From 09bdec20a0cc763447fed84b48f33856288721d9 Mon Sep 17 00:00:00 2001 From: Mateusz Guzik Date: Sat, 17 Mar 2018 19:26:33 +0000 Subject: locks: slightly depessimize lockstat The slow path is always taken when lockstat is enabled. This induces rdtsc (or other) calls to get the cycle count even when there was no contention. Still go to the slow path to not mess with the fast path, but avoid the heavy lifting unless necessary. This reduces sys and real time during -j 80 buildkernel: before: 3651.84s user 1105.59s system 5394% cpu 1:28.18 total after: 3685.99s user 975.74s system 5450% cpu 1:25.53 total disabled: 3697.96s user 411.13s system 5261% cpu 1:18.10 total So note this is still a significant hit. LOCK_PROFILING results are not affected. --- sys/kern/kern_rwlock.c | 61 +++++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 25 deletions(-) (limited to 'sys/kern/kern_rwlock.c') diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index c15e24d07974..9afb756e74d3 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -438,7 +438,21 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) uintptr_t state; - int doing_lockprof; + int doing_lockprof = 0; +#endif + +#ifdef KDTRACE_HOOKS + if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) { + if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) + goto out_lockstat; + doing_lockprof = 1; + all_time -= lockstat_nsecs(&rw->lock_object); + state = v; + } +#endif +#ifdef LOCK_PROFILING + doing_lockprof = 1; + state = v; #endif if (SCHEDULER_STOPPED()) @@ -456,17 +470,6 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v lock_profile_obtain_lock_failed(&rw->lock_object, &contested, &waittime); -#ifdef LOCK_PROFILING - doing_lockprof = 1; - state = v; -#elif defined(KDTRACE_HOOKS) - doing_lockprof = lockstat_enabled; - if (__predict_false(doing_lockprof)) { - all_time -= lockstat_nsecs(&rw->lock_object); - state = v; - } -#endif - for (;;) { if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) break; @@ -615,6 +618,7 @@ retry_ts: LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); +out_lockstat: #endif /* * TODO: acquire "owner of record" here. Here be turnstile dragons @@ -892,10 +896,28 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) uintptr_t state; - int doing_lockprof; + int doing_lockprof = 0; #endif tid = (uintptr_t)curthread; + rw = rwlock2rw(c); + +#ifdef KDTRACE_HOOKS + if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) { + while (v == RW_UNLOCKED) { + if (_rw_write_lock_fetch(rw, &v, tid)) + goto out_lockstat; + } + doing_lockprof = 1; + all_time -= lockstat_nsecs(&rw->lock_object); + state = v; + } +#endif +#ifdef LOCK_PROFILING + doing_lockprof = 1; + state = v; +#endif + if (SCHEDULER_STOPPED()) return; @@ -904,7 +926,6 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif - rw = rwlock2rw(c); if (__predict_false(v == RW_UNLOCKED)) v = RW_READ_VALUE(rw); @@ -929,17 +950,6 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) lock_profile_obtain_lock_failed(&rw->lock_object, &contested, &waittime); -#ifdef LOCK_PROFILING - doing_lockprof = 1; - state = v; -#elif defined(KDTRACE_HOOKS) - doing_lockprof = lockstat_enabled; - if (__predict_false(doing_lockprof)) { - all_time -= lockstat_nsecs(&rw->lock_object); - state = v; - } -#endif - for (;;) { if (v == RW_UNLOCKED) { if (_rw_write_lock_fetch(rw, &v, tid)) @@ -1101,6 +1111,7 @@ retry_ts: LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); +out_lockstat: #endif LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, waittime, file, line, LOCKSTAT_WRITER); -- cgit v1.2.3