aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorKip Macy <kmacy@FreeBSD.org>2007-02-26 08:26:44 +0000
committerKip Macy <kmacy@FreeBSD.org>2007-02-26 08:26:44 +0000
commitfe68a9163151dfb00adbf9de32b482579f46af76 (patch)
tree329dd33524baa0ad9fe43f498498a2d120152d0d /sys/kern
parent772ad651bf7af82b57558e5a6268e3961e9c2ad8 (diff)
downloadsrc-fe68a9163151dfb00adbf9de32b482579f46af76.tar.gz
src-fe68a9163151dfb00adbf9de32b482579f46af76.zip
general LOCK_PROFILING cleanup
- only collect timestamps when a lock is contested - this reduces the overhead of collecting profiles from 20x to 5x - remove unused function from subr_lock.c - generalize cnt_hold and cnt_lock statistics to be kept for all locks - NOTE: rwlock profiling generates invalid statistics (and most likely always has) someone familiar with that should review
Notes
Notes: svn path=/head/; revision=167012
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c29
-rw-r--r--sys/kern/kern_mutex.c29
-rw-r--r--sys/kern/kern_rwlock.c14
-rw-r--r--sys/kern/kern_sx.c16
-rw-r--r--sys/kern/sched_ule.c2
-rw-r--r--sys/kern/subr_lock.c61
6 files changed, 40 insertions, 111 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 2e010b7df899..e15cfa6f81fd 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -84,7 +84,7 @@ struct lock_class lock_class_lockmgr = {
#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
LK_SHARE_NONZERO | LK_WAIT_NONZERO)
-static int acquire(struct lock **lkpp, int extflags, int wanted);
+static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime);
static int acquiredrain(struct lock *lkp, int extflags) ;
static __inline void
@@ -112,7 +112,7 @@ shareunlock(struct thread *td, struct lock *lkp, int decr) {
}
static int
-acquire(struct lock **lkpp, int extflags, int wanted)
+acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime)
{
struct lock *lkp = *lkpp;
int error;
@@ -123,6 +123,9 @@ acquire(struct lock **lkpp, int extflags, int wanted)
if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
return EBUSY;
error = 0;
+ if ((lkp->lk_flags & wanted) != 0)
+ lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
+
while ((lkp->lk_flags & wanted) != 0) {
CTR2(KTR_LOCK,
"acquire(): lkp == %p, lk_flags == 0x%x sleeping",
@@ -168,15 +171,15 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
int error;
struct thread *thr;
int extflags, lockflags;
- uint64_t waitstart;
-
+ int contested = 0;
+ uint64_t waitstart = 0;
+
error = 0;
if (td == NULL)
thr = LK_KERNPROC;
else
thr = td;
- lock_profile_waitstart(&waitstart);
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
CTR6(KTR_LOCK,
@@ -228,12 +231,12 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
lockflags = LK_HAVE_EXCL;
if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
- error = acquire(&lkp, extflags, lockflags);
+ error = acquire(&lkp, extflags, lockflags, &contested, &waitstart);
if (error)
break;
sharelock(td, lkp, 1);
if (lkp->lk_sharecount == 1)
- lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
+ lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
#if defined(DEBUG_LOCKS)
stack_save(&lkp->lk_stack);
@@ -246,7 +249,7 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
*/
sharelock(td, lkp, 1);
if (lkp->lk_sharecount == 1)
- lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
+ lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
@@ -308,7 +311,7 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
* drop to zero, then take exclusive lock.
*/
lkp->lk_flags |= LK_WANT_UPGRADE;
- error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
+ error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart);
lkp->lk_flags &= ~LK_WANT_UPGRADE;
if (error) {
@@ -322,7 +325,7 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
lkp->lk_lockholder = thr;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
- lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
+ lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
#if defined(DEBUG_LOCKS)
stack_save(&lkp->lk_stack);
#endif
@@ -362,14 +365,14 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
/*
* Try to acquire the want_exclusive flag.
*/
- error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
+ error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart);
if (error)
break;
lkp->lk_flags |= LK_WANT_EXCL;
/*
* Wait for shared locks and upgrades to finish.
*/
- error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
+ error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart);
lkp->lk_flags &= ~LK_WANT_EXCL;
if (error) {
if (lkp->lk_flags & LK_WAIT_NONZERO)
@@ -382,7 +385,7 @@ _lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
- lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
+ lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
#if defined(DEBUG_LOCKS)
stack_save(&lkp->lk_stack);
#endif
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 55ae0f37030e..b3b652e00bb3 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -137,7 +137,6 @@ static inline void lock_profile_init(void) {;}
void
_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{
- uint64_t waittime;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
@@ -148,13 +147,11 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
- lock_profile_waitstart(&waittime);
_get_sleep_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
- lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
}
void
@@ -181,8 +178,6 @@ void
_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{
- uint64_t waittime;
-
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -191,12 +186,10 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
- lock_profile_waitstart(&waittime);
_get_spin_lock(m, curthread, opts, file, line);
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
- lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
}
void
@@ -225,9 +218,9 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
- int rval;
+ int rval, contested = 0;
uint64_t waittime = 0;
-
+
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
@@ -247,7 +240,9 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
curthread->td_locks++;
- lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
+ if (m->mtx_recurse == 0)
+ lock_profile_obtain_lock_success(&m->mtx_object, contested,
+ waittime, file, line);
}
@@ -271,8 +266,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
int cont_logged = 0;
#endif
uintptr_t v;
- int contested = 0;
-
+
if (mtx_owned(m)) {
KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -289,8 +283,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
- while (!_obtain_lock(m, tid)) {
- lock_profile_obtain_lock_failed(&m->mtx_object, &contested);
+ while (!_obtain_lock(m, tid)) {
turnstile_lock(&m->mtx_object);
v = m->mtx_lock;
@@ -381,11 +374,6 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
m->mtx_object.lo_name, (void *)tid, file, line);
}
#endif
-#ifdef LOCK_PROFILING
- m->mtx_object.lo_profile_obj.lpo_contest_holding = 0;
- if (contested)
- m->mtx_object.lo_profile_obj.lpo_contest_locking++;
-#endif
return;
}
@@ -400,14 +388,13 @@ void
_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
int line)
{
+ int i = 0;
struct thread *td;
- int contested = 0, i = 0;
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
while (!_obtain_lock(m, tid)) {
- lock_profile_obtain_lock_failed(&m->mtx_object, &contested);
/* Give interrupts a chance while we spin. */
spinlock_exit();
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 82535fe56d12..a59de60008fd 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -111,7 +111,6 @@ rw_sysinit(void *arg)
void
_rw_wlock(struct rwlock *rw, const char *file, int line)
{
- uint64_t waitstart;
MPASS(curthread != NULL);
KASSERT(rw_wowner(rw) != curthread,
@@ -119,9 +118,7 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
rw->rw_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
- lock_profile_waitstart(&waitstart);
__rw_wlock(rw, curthread, file, line);
- lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
@@ -166,7 +163,6 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* be blocked on the writer, and the writer would be blocked
* waiting for the reader to release its original read lock.
*/
- lock_profile_waitstart(&waitstart);
for (;;) {
/*
* Handle the easy case. If no other thread has a write
@@ -189,7 +185,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
MPASS((x & RW_LOCK_READ_WAITERS) == 0);
if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
x + RW_ONE_READER)) {
- lock_profile_obtain_lock_success(&rw->rw_object, waitstart, file, line);
+ lock_profile_obtain_lock_success(&rw->rw_object, contested, waitstart, file, line);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
CTR4(KTR_LOCK,
"%s: %p succeed %p -> %p", __func__,
@@ -197,8 +193,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
(void *)(x + RW_ONE_READER));
break;
}
+ lock_profile_obtain_lock_failed(&rw->rw_object, &contested, &waitstart);
cpu_spinwait();
- lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
continue;
}
@@ -247,7 +243,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
*/
owner = (struct thread *)RW_OWNER(x);
if (TD_IS_RUNNING(owner)) {
- lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
+ lock_profile_obtain_lock_failed(&rw->rw_object, &contested, &waitstart);
turnstile_release(&rw->rw_object);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
@@ -411,7 +407,6 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
#ifdef SMP
volatile struct thread *owner;
#endif
- int contested;
uintptr_t v;
if (LOCK_LOG_TEST(&rw->rw_object, 0))
@@ -453,7 +448,6 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
}
turnstile_release(&rw->rw_object);
cpu_spinwait();
- lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
continue;
}
@@ -467,7 +461,6 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
v | RW_LOCK_WRITE_WAITERS)) {
turnstile_release(&rw->rw_object);
cpu_spinwait();
- lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
continue;
}
if (LOCK_LOG_TEST(&rw->rw_object, 0))
@@ -483,7 +476,6 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
*/
owner = (struct thread *)RW_OWNER(v);
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
- lock_profile_obtain_lock_failed(&rw->rw_object, &contested);
turnstile_release(&rw->rw_object);
if (LOCK_LOG_TEST(&rw->rw_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index ecab0e91a599..e0bff421c2d7 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -111,7 +111,7 @@ void
_sx_slock(struct sx *sx, const char *file, int line)
{
uint64_t waittime = 0;
- int contested;
+ int contested = 0;
mtx_lock(sx->sx_lock);
KASSERT(sx->sx_xholder != curthread,
@@ -122,11 +122,9 @@ _sx_slock(struct sx *sx, const char *file, int line)
/*
* Loop in case we lose the race for lock acquisition.
*/
- if (sx->sx_cnt < 0)
- lock_profile_waitstart(&waittime);
while (sx->sx_cnt < 0) {
sx->sx_shrd_wcnt++;
- lock_profile_obtain_lock_failed(&sx->sx_object, &contested);
+ lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime);
cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
sx->sx_shrd_wcnt--;
}
@@ -135,7 +133,7 @@ _sx_slock(struct sx *sx, const char *file, int line)
sx->sx_cnt++;
if (sx->sx_cnt == 1)
- lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line);
+ lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line);
LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, 0, file, line);
@@ -166,7 +164,7 @@ _sx_try_slock(struct sx *sx, const char *file, int line)
void
_sx_xlock(struct sx *sx, const char *file, int line)
{
- int contested;
+ int contested = 0;
uint64_t waittime = 0;
mtx_lock(sx->sx_lock);
@@ -184,12 +182,10 @@ _sx_xlock(struct sx *sx, const char *file, int line)
WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
- if (sx->sx_cnt)
- lock_profile_waitstart(&waittime);
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
sx->sx_excl_wcnt++;
- lock_profile_obtain_lock_failed(&sx->sx_object, &contested);
+ lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime);
cv_wait(&sx->sx_excl_cv, sx->sx_lock);
sx->sx_excl_wcnt--;
}
@@ -200,7 +196,7 @@ _sx_xlock(struct sx *sx, const char *file, int line)
sx->sx_cnt--;
sx->sx_xholder = curthread;
- lock_profile_obtain_lock_success(&sx->sx_object, waittime, file, line);
+ lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line);
LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 4ed52168b23e..f52cbc84ced9 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -997,7 +997,7 @@ sched_setup(void *dummy)
tdq = &tdq_cpu[i];
tdq_setup(&tdq_cpu[i]);
}
- if (smp_topology == NULL) {
+ if (1) {
struct tdq_group *tdg;
struct tdq *tdq;
int cpus;
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c
index f55c85c5702f..8bd443150402 100644
--- a/sys/kern/subr_lock.c
+++ b/sys/kern/subr_lock.c
@@ -250,13 +250,17 @@ DB_SHOW_COMMAND(lock, db_show_lock)
#endif
#ifdef LOCK_PROFILING
-void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, con\
-st char *file, int line)
+void _lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
/* don't reset the timer when/if recursing */
if (l->lpo_acqtime == 0) {
+ lo->lo_profile_obj.lpo_contest_holding = 0;
+
+ if (contested)
+ lo->lo_profile_obj.lpo_contest_locking++;
+
l->lpo_filename = file;
l->lpo_lineno = line;
l->lpo_acqtime = nanoseconds();
@@ -267,59 +271,6 @@ st char *file, int line)
}
}
-void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart)
-{
- struct lock_profile_object *l = &lo->lo_profile_obj;
-
- if (lock_prof_enable && waitstart) {
- uint64_t now, waittime;
- struct lock_prof *mpp;
- u_int hash;
- const char *p = l->lpo_filename;
- int collision = 0;
- now = nanoseconds();
- if (now < waitstart)
- return;
- waittime = now - waitstart;
- hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
-
- mpp = &lprof_buf[hash];
- while (mpp->name != NULL) {
- if (mpp->line == l->lpo_lineno &&
- mpp->file == p &&
- mpp->namehash == l->lpo_namehash)
- break;
- /* If the lprof_hash entry is allocated to someone else, try the next one */
- collision = 1;
- CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, mpp->line, mpp->name, mpp->namehash);
- hash = (hash + 1) & LPROF_HASH_MASK;
- mpp = &lprof_buf[hash];
- }
- if (mpp->name == NULL) {
- int buf;
-
- buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
- /* Just exit if we cannot get a trace buffer */
- if (buf >= LPROF_HASH_SIZE) {
- ++lock_prof_rejected;
- return;
- }
- mpp->file = p;
- mpp->line = l->lpo_lineno;
- mpp->namehash = l->lpo_namehash;
- mpp->type = l->lpo_type;
- mpp->name = lo->lo_name;
- if (collision)
- ++lock_prof_collisions;
- /* We might have raced someone else but who cares, they'll try again next time */
- ++lock_prof_records;
- }
- LPROF_LOCK(hash);
- mpp->cnt_wait += waittime;
- LPROF_UNLOCK(hash);
- }
-}
-
void _lock_profile_release_lock(struct lock_object *lo)
{
struct lock_profile_object *l = &lo->lo_profile_obj;