diff options
author | Jeff Roberson <jeff@FreeBSD.org> | 2007-12-15 23:13:31 +0000 |
---|---|---|
committer | Jeff Roberson <jeff@FreeBSD.org> | 2007-12-15 23:13:31 +0000 |
commit | eea4f254fe8f66e4d58b9befe435010a2d485423 (patch) | |
tree | 5aa1ecb0fadd118191701a2b1c611fcee7216753 /sys/kern/subr_lock.c | |
parent | 4c908c35e08a0203c425a64e93de4d6344fb34fc (diff) |
- Re-implement lock profiling in such a way that it no longer breaks
the ABI when enabled. There is no longer an embedded lock_profile_object
in each lock. Instead a list of lock_profile_objects is kept per-thread
for each lock it may own. The cnt_hold statistic is now always 0 to
facilitate this.
- Support shared locking by tracking individual lock instances and
statistics in the per-thread per-instance lock_profile_object.
- Make the lock profiling hash table a per-cpu singly linked list with a
per-cpu static lock_prof allocator. This removes the need for an array
of spinlocks and reduces cache contention between cores.
- Use a seperate hash for spinlocks and other locks so that only a
critical_enter() is required and not a spinlock_enter() to modify the
per-cpu tables.
- Count time spent spinning in the lock statistics.
- Remove the LOCK_PROFILE_SHARED option as it is always supported now.
- Specifically drop and release the scheduler locks in both schedulers
since we track owners now.
In collaboration with: Kip Macy
Sponsored by: Nokia
Notes
Notes:
svn path=/head/; revision=174629
Diffstat (limited to 'sys/kern/subr_lock.c')
-rw-r--r-- | sys/kern/subr_lock.c | 656 |
1 files changed, 434 insertions, 222 deletions
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c index ebb3c359aa42..19f363906058 100644 --- a/sys/kern/subr_lock.c +++ b/sys/kern/subr_lock.c @@ -40,17 +40,24 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> +#include <sys/kernel.h> #include <sys/ktr.h> #include <sys/linker_set.h> #include <sys/lock.h> +#include <sys/lock_profile.h> +#include <sys/malloc.h> +#include <sys/pcpu.h> +#include <sys/proc.h> #include <sys/sbuf.h> +#include <sys/smp.h> #include <sys/sysctl.h> -#include <sys/lock_profile.h> #ifdef DDB #include <ddb/ddb.h> #endif +#include <machine/cpufunc.h> + CTASSERT(LOCK_CLASS_MAX == 15); struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { @@ -62,136 +69,6 @@ struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { &lock_class_lockmgr, }; -#ifdef LOCK_PROFILING -#include <machine/cpufunc.h> - -SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); -SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling"); -int lock_prof_enable = 0; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, enable, CTLFLAG_RW, - &lock_prof_enable, 0, "Enable lock profiling"); - -/* - * lprof_buf is a static pool of profiling records to avoid possible - * reentrance of the memory allocation functions. - * - * Note: NUM_LPROF_BUFFERS must be smaller than LPROF_HASH_SIZE. - */ -struct lock_prof lprof_buf[LPROF_HASH_SIZE]; -static int allocated_lprof_buf; -struct mtx lprof_locks[LPROF_LOCK_SIZE]; - - -/* SWAG: sbuf size = avg stat. line size * number of locks */ -#define LPROF_SBUF_SIZE 256 * 400 - -static int lock_prof_acquisitions; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, acquisitions, CTLFLAG_RD, - &lock_prof_acquisitions, 0, "Number of lock acquistions recorded"); -static int lock_prof_records; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, records, CTLFLAG_RD, - &lock_prof_records, 0, "Number of profiling records"); -static int lock_prof_maxrecords = LPROF_HASH_SIZE; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, maxrecords, CTLFLAG_RD, - &lock_prof_maxrecords, 0, "Maximum number of profiling records"); -static int lock_prof_rejected; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, - &lock_prof_rejected, 0, "Number of rejected profiling records"); -static int lock_prof_hashsize = LPROF_HASH_SIZE; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, hashsize, CTLFLAG_RD, - &lock_prof_hashsize, 0, "Hash size"); -static int lock_prof_collisions = 0; -SYSCTL_INT(_debug_lock_prof, OID_AUTO, collisions, CTLFLAG_RD, - &lock_prof_collisions, 0, "Number of hash collisions"); - -#ifndef USE_CPU_NANOSECONDS -u_int64_t -nanoseconds(void) -{ - struct timespec tv; - - nanotime(&tv); - return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); -} -#endif - -static int -dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) -{ - struct sbuf *sb; - int error, i; - static int multiplier = 1; - const char *p; - - if (allocated_lprof_buf == 0) - return (SYSCTL_OUT(req, "No locking recorded", - sizeof("No locking recorded"))); - -retry_sbufops: - sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); - sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n", - "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); - for (i = 0; i < LPROF_HASH_SIZE; ++i) { - if (lprof_buf[i].name == NULL) - continue; - for (p = lprof_buf[i].file; - p != NULL && strncmp(p, "../", 3) == 0; p += 3) - /* nothing */ ; - sbuf_printf(sb, "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n", - lprof_buf[i].cnt_max / 1000, - lprof_buf[i].cnt_tot / 1000, - lprof_buf[i].cnt_wait / 1000, - lprof_buf[i].cnt_cur, - lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : - lprof_buf[i].cnt_tot / (lprof_buf[i].cnt_cur * 1000), - lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : - lprof_buf[i].cnt_wait / (lprof_buf[i].cnt_cur * 1000), - lprof_buf[i].cnt_contest_holding, - lprof_buf[i].cnt_contest_locking, - p, lprof_buf[i].line, - lprof_buf[i].type, - lprof_buf[i].name); - if (sbuf_overflowed(sb)) { - sbuf_delete(sb); - multiplier++; - goto retry_sbufops; - } - } - - sbuf_finish(sb); - error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); - sbuf_delete(sb); - return (error); -} -static int -reset_lock_prof_stats(SYSCTL_HANDLER_ARGS) -{ - int error, v; - - if (allocated_lprof_buf == 0) - return (0); - - v = 0; - error = sysctl_handle_int(oidp, &v, 0, req); - if (error) - return (error); - if (req->newptr == NULL) - return (error); - if (v == 0) - return (0); - - bzero(lprof_buf, LPROF_HASH_SIZE*sizeof(*lprof_buf)); - allocated_lprof_buf = 0; - return (0); -} - -SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, - NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); - -SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, - NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); -#endif - void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags) @@ -216,7 +93,6 @@ lock_init(struct lock_object *lock, struct lock_class *class, const char *name, lock->lo_flags |= flags | LO_INITIALIZED; LOCK_LOG_INIT(lock, 0); WITNESS_INIT(lock); - lock_profile_object_init(lock, class, name); } void @@ -224,7 +100,6 @@ lock_destroy(struct lock_object *lock) { KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock)); - lock_profile_object_destroy(lock); WITNESS_DESTROY(lock); LOCK_LOG_DESTROY(lock, 0); lock->lo_flags &= ~LO_INITIALIZED; @@ -253,17 +128,376 @@ DB_SHOW_COMMAND(lock, db_show_lock) #endif #ifdef LOCK_PROFILING -void _lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line) + +/* + * One object per-thread for each lock the thread owns. Tracks individual + * lock instances. + */ +struct lock_profile_object { + LIST_ENTRY(lock_profile_object) lpo_link; + struct lock_object *lpo_obj; + const char *lpo_file; + int lpo_line; + uint16_t lpo_ref; + uint16_t lpo_cnt; + u_int64_t lpo_acqtime; + u_int64_t lpo_waittime; + u_int lpo_contest_locking; +}; + +/* + * One lock_prof for each (file, line, lock object) triple. + */ +struct lock_prof { + SLIST_ENTRY(lock_prof) link; + const char *file; + const char *name; + int line; + int ticks; + const char *type; + uintmax_t cnt_max; + uintmax_t cnt_tot; + uintmax_t cnt_wait; + uintmax_t cnt_cur; + uintmax_t cnt_contest_locking; +}; + +SLIST_HEAD(lphead, lock_prof); + +#define LPROF_HASH_SIZE 4096 +#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) +#define LPROF_CACHE_SIZE 4096 + +/* + * Array of objects and profs for each type of object for each cpu. Spinlocks + * are handled seperately because a thread may be preempted and acquire a + * spinlock while in the lock profiling code of a non-spinlock. In this way + * we only need a critical section to protect the per-cpu lists. + */ +struct lock_prof_type { + struct lphead lpt_lpalloc; + struct lpohead lpt_lpoalloc; + struct lphead lpt_hash[LPROF_HASH_SIZE]; + struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; + struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; +}; + +struct lock_prof_cpu { + struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ +}; + +struct lock_prof_cpu *lp_cpu[MAXCPU]; + +int lock_prof_enable = 0; + +/* SWAG: sbuf size = avg stat. line size * number of locks */ +#define LPROF_SBUF_SIZE 256 * 400 + +static int lock_prof_rejected; +static int lock_prof_skipspin; +static int lock_prof_skipcount; + +#ifndef USE_CPU_NANOSECONDS +u_int64_t +nanoseconds(void) { - struct lock_profile_object *l = &lo->lo_profile_obj; + struct bintime bt; + u_int64_t ns; + + binuptime(&bt); + /* From bintime2timespec */ + ns = bt.sec * (u_int64_t)1000000000; + ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; + return (ns); +} +#endif - lo->lo_profile_obj.lpo_contest_holding = 0; - - if (contested) - lo->lo_profile_obj.lpo_contest_locking++; +static void +lock_prof_init_type(struct lock_prof_type *type) +{ + int i; + + SLIST_INIT(&type->lpt_lpalloc); + LIST_INIT(&type->lpt_lpoalloc); + for (i = 0; i < LPROF_CACHE_SIZE; i++) { + SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], + link); + LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], + lpo_link); + } +} + +static void +lock_prof_init(void *arg) +{ + int cpu; + + for (cpu = 0; cpu <= mp_maxid; cpu++) { + lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, + M_WAITOK | M_ZERO); + lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); + lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); + } +} +SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); + +static void +lock_prof_reset(void) +{ + struct lock_prof_cpu *lpc; + int enabled, i, cpu; + + enabled = lock_prof_enable; + lock_prof_enable = 0; + for (cpu = 0; cpu <= mp_maxid; cpu++) { + lpc = lp_cpu[cpu]; + for (i = 0; i < LPROF_CACHE_SIZE; i++) { + LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); + LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); + } + bzero(lpc, sizeof(*lpc)); + lock_prof_init_type(&lpc->lpc_types[0]); + lock_prof_init_type(&lpc->lpc_types[1]); + } + lock_prof_enable = enabled; +} + +static void +lock_prof_output(struct lock_prof *lp, struct sbuf *sb) +{ + const char *p; + + for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); + sbuf_printf(sb, + "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n", + lp->cnt_max / 1000, lp->cnt_tot / 1000, + lp->cnt_wait / 1000, lp->cnt_cur, + lp->cnt_cur == 0 ? (uintmax_t)0 : + lp->cnt_tot / (lp->cnt_cur * 1000), + lp->cnt_cur == 0 ? (uintmax_t)0 : + lp->cnt_wait / (lp->cnt_cur * 1000), + (uintmax_t)0, lp->cnt_contest_locking, + p, lp->line, lp->type, lp->name); +} + +static void +lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, + int spin, int t) +{ + struct lock_prof_type *type; + struct lock_prof *l; + int cpu; + + dst->file = match->file; + dst->line = match->line; + dst->type = match->type; + dst->name = match->name; + + for (cpu = 0; cpu <= mp_maxid; cpu++) { + if (lp_cpu[cpu] == NULL) + continue; + type = &lp_cpu[cpu]->lpc_types[spin]; + SLIST_FOREACH(l, &type->lpt_hash[hash], link) { + if (l->ticks == t) + continue; + if (l->file != match->file || l->line != match->line || + l->name != match->name || l->type != match->type) + continue; + l->ticks = t; + if (l->cnt_max > dst->cnt_max) + dst->cnt_max = l->cnt_max; + dst->cnt_tot += l->cnt_tot; + dst->cnt_wait += l->cnt_wait; + dst->cnt_cur += l->cnt_cur; + dst->cnt_contest_locking += l->cnt_contest_locking; + } + } - l->lpo_filename = file; - l->lpo_lineno = line; +} + +static void +lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, + int t) +{ + struct lock_prof *l; + int i; + + for (i = 0; i < LPROF_HASH_SIZE; ++i) { + SLIST_FOREACH(l, &type->lpt_hash[i], link) { + struct lock_prof lp = {}; + + if (l->ticks == t) + continue; + lock_prof_sum(l, &lp, i, spin, t); + lock_prof_output(&lp, sb); + if (sbuf_overflowed(sb)) + return; + } + } +} + +static int +dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) +{ + static int multiplier = 1; + struct sbuf *sb; + int error, cpu, t; + +retry_sbufops: + sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); + sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n", + "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); + t = ticks; + for (cpu = 0; cpu <= mp_maxid; cpu++) { + if (lp_cpu[cpu] == NULL) + continue; + lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); + lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); + if (sbuf_overflowed(sb)) { + sbuf_delete(sb); + multiplier++; + goto retry_sbufops; + } + } + + sbuf_finish(sb); + error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); + sbuf_delete(sb); + return (error); +} + +static int +enable_lock_prof(SYSCTL_HANDLER_ARGS) +{ + int error, v; + + v = lock_prof_enable; + error = sysctl_handle_int(oidp, &v, v, req); + if (error) + return (error); + if (req->newptr == NULL) + return (error); + if (v == lock_prof_enable) + return (0); + if (v == 1) + lock_prof_reset(); + lock_prof_enable = !!v; + + return (0); +} + +static int +reset_lock_prof_stats(SYSCTL_HANDLER_ARGS) +{ + int error, v; + + v = 0; + error = sysctl_handle_int(oidp, &v, 0, req); + if (error) + return (error); + if (req->newptr == NULL) + return (error); + if (v == 0) + return (0); + lock_prof_reset(); + + return (0); +} + +static struct lock_prof * +lock_profile_lookup(struct lock_object *lo, int spin, const char *file, + int line) +{ + const char *unknown = "(unknown)"; + struct lock_prof_type *type; + struct lock_prof *lp; + struct lphead *head; + const char *p; + u_int hash; + + p = file; + if (p == NULL || *p == '\0') + p = unknown; + hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; + hash &= LPROF_HASH_MASK; + type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; + head = &type->lpt_hash[hash]; + SLIST_FOREACH(lp, head, link) { + if (lp->line == line && lp->file == p && + lp->name == lo->lo_name) + return (lp); + + } + lp = SLIST_FIRST(&type->lpt_lpalloc); + if (lp == NULL) { + lock_prof_rejected++; + return (lp); + } + SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); + lp->file = p; + lp->line = line; + lp->type = lo->lo_type; + lp->name = lo->lo_name; + SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); + return (lp); +} + +static struct lock_profile_object * +lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, + int line) +{ + struct lock_profile_object *l; + struct lock_prof_type *type; + struct lpohead *head; + + head = &curthread->td_lprof[spin]; + LIST_FOREACH(l, head, lpo_link) + if (l->lpo_obj == lo && l->lpo_file == file && + l->lpo_line == line) + return (l); + critical_enter(); + type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; + l = LIST_FIRST(&type->lpt_lpoalloc); + if (l == NULL) { + lock_prof_rejected++; + critical_exit(); + return (NULL); + } + LIST_REMOVE(l, lpo_link); + critical_exit(); + l->lpo_obj = lo; + l->lpo_file = file; + l->lpo_line = line; + l->lpo_cnt = 0; + LIST_INSERT_HEAD(head, l, lpo_link); + + return (l); +} + +void +lock_profile_obtain_lock_success(struct lock_object *lo, int contested, + uint64_t waittime, const char *file, int line) +{ + static int lock_prof_count; + struct lock_profile_object *l; + int spin; + + /* don't reset the timer when/if recursing */ + if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) + return; + if (lock_prof_skipcount && + (++lock_prof_count % lock_prof_skipcount) == 0) + return; + spin = LOCK_CLASS(lo) == &lock_class_mtx_spin; + if (spin && lock_prof_skipspin == 1) + return; + l = lock_profile_object_lookup(lo, spin, file, line); + if (l == NULL) + return; + l->lpo_cnt++; + if (++l->lpo_ref > 1) + return; + l->lpo_contest_locking = contested; l->lpo_acqtime = nanoseconds(); if (waittime && (l->lpo_acqtime > waittime)) l->lpo_waittime = l->lpo_acqtime - waittime; @@ -271,87 +505,65 @@ void _lock_profile_obtain_lock_success(struct lock_object *lo, int contested, ui l->lpo_waittime = 0; } -void _lock_profile_release_lock(struct lock_object *lo) +void +lock_profile_release_lock(struct lock_object *lo) { - struct lock_profile_object *l = &lo->lo_profile_obj; - - if (l->lpo_acqtime) { - const char *unknown = "(unknown)"; - u_int64_t acqtime, now, waittime; - struct lock_prof *mpp; - u_int hash; - const char *p = l->lpo_filename; - int collision = 0; - - now = nanoseconds(); - acqtime = l->lpo_acqtime; - waittime = l->lpo_waittime; - if (now <= acqtime) - return; - if (p == NULL || *p == '\0') - p = unknown; - hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK; - mpp = &lprof_buf[hash]; - while (mpp->name != NULL) { - if (mpp->line == l->lpo_lineno && - mpp->file == p && - mpp->namehash == l->lpo_namehash) - break; - /* If the lprof_hash entry is allocated to someone - * else, try the next one - */ - collision = 1; - hash = (hash + 1) & LPROF_HASH_MASK; - mpp = &lprof_buf[hash]; - } - if (mpp->name == NULL) { - int buf; - - buf = atomic_fetchadd_int(&allocated_lprof_buf, 1); - /* Just exit if we cannot get a trace buffer */ - if (buf >= LPROF_HASH_SIZE) { - ++lock_prof_rejected; - return; - } - mpp->file = p; - mpp->line = l->lpo_lineno; - mpp->namehash = l->lpo_namehash; - mpp->type = l->lpo_type; - mpp->name = lo->lo_name; - - if (collision) - ++lock_prof_collisions; - - /* - * We might have raced someone else but who cares, - * they'll try again next time - */ - ++lock_prof_records; - } - LPROF_LOCK(hash); - /* - * Record if the lock has been held longer now than ever - * before. - */ - if (now - acqtime > mpp->cnt_max) - mpp->cnt_max = now - acqtime; - mpp->cnt_tot += now - acqtime; - mpp->cnt_wait += waittime; - mpp->cnt_cur++; - /* - * There's a small race, really we should cmpxchg - * 0 with the current value, but that would bill - * the contention to the wrong lock instance if - * it followed this also. - */ - mpp->cnt_contest_holding += l->lpo_contest_holding; - mpp->cnt_contest_locking += l->lpo_contest_locking; - LPROF_UNLOCK(hash); - - } - l->lpo_acqtime = 0; - l->lpo_waittime = 0; - l->lpo_contest_locking = 0; - l->lpo_contest_holding = 0; + struct lock_profile_object *l; + struct lock_prof_type *type; + struct lock_prof *lp; + u_int64_t holdtime; + struct lpohead *head; + int spin; + + if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) + return; + spin = LOCK_CLASS(lo) == &lock_class_mtx_spin; + head = &curthread->td_lprof[spin]; + critical_enter(); + LIST_FOREACH(l, head, lpo_link) + if (l->lpo_obj == lo) + break; + if (l == NULL) + goto out; + if (--l->lpo_ref > 0) + goto out; + lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); + if (lp == NULL) + goto release; + holdtime = nanoseconds() - l->lpo_acqtime; + if (holdtime < 0) + goto release; + /* + * Record if the lock has been held longer now than ever + * before. + */ + if (holdtime > lp->cnt_max) + lp->cnt_max = holdtime; + lp->cnt_tot += holdtime; + lp->cnt_wait += l->lpo_waittime; + lp->cnt_contest_locking += l->lpo_contest_locking; + lp->cnt_cur += l->lpo_cnt; +release: + LIST_REMOVE(l, lpo_link); + type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; + LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); +out: + critical_exit(); } + +SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); +SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling"); +SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, + &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); +SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, + &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); +SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, + &lock_prof_rejected, 0, "Number of rejected profiling records"); +SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, + NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); +SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, + NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); +SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, + NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); + #endif |