diff options
author | Attilio Rao <attilio@FreeBSD.org> | 2007-11-26 22:37:35 +0000 |
---|---|---|
committer | Attilio Rao <attilio@FreeBSD.org> | 2007-11-26 22:37:35 +0000 |
commit | 49aead8a10c48a0ebc292d6cbb0d1c5623025e01 (patch) | |
tree | 54b27d85fcd529d57c33cc56cba5dec439d40f9b /sys/kern/kern_mutex.c | |
parent | 221a97c1942577c250240eb760378ef6ea8e7223 (diff) | |
download | src-49aead8a10c48a0ebc292d6cbb0d1c5623025e01.tar.gz src-49aead8a10c48a0ebc292d6cbb0d1c5623025e01.zip |
Simplify the adaptive spinning algorithm in rwlock and mutex:
currently, before to spin the turnstile spinlock is acquired and the
waiters flag is set.
This is not strictly necessary, so just spin before to acquire the
spinlock and to set the flags.
This will simplify a lot other functions too, as now we have the waiters
flag set only if there are actually waiters.
This should make wakeup/sleeping couplet faster under intensive mutex
workload.
This also fixes a bug in rw_try_upgrade() in the adaptive case, where
turnstile_lookup() will recurse on the ts_lock lock that will never be
really released [1].
[1] Reported by: jeff with Nokia help
Tested by: pho, kris (earlier, bugged version of rwlock part)
Discussed with: jhb [2], jeff
MFC after: 1 week
[2] John had a similar patch about 6.x and/or 7.x about mutexes probabilly
Notes
Notes:
svn path=/head/; revision=173960
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r-- | sys/kern/kern_mutex.c | 70 |
1 files changed, 41 insertions, 29 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index cb054fd38c2b..56c6e000e20a 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -335,6 +335,31 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, m->lock_object.lo_name, (void *)m->mtx_lock, file, line); while (!_obtain_lock(m, tid)) { +#ifdef ADAPTIVE_MUTEXES + /* + * If the owner is running on another CPU, spin until the + * owner stops running or the state of the lock changes. + */ + v = m->mtx_lock; + if (v != MTX_UNOWNED) { + owner = (struct thread *)(v & ~MTX_FLAGMASK); +#ifdef ADAPTIVE_GIANT + if (TD_IS_RUNNING(owner)) { +#else + if (m != &Giant && TD_IS_RUNNING(owner)) { +#endif + if (LOCK_LOG_TEST(&m->lock_object, 0)) + CTR3(KTR_LOCK, + "%s: spinning on %p held by %p", + __func__, m, owner); + while (mtx_owner(m) == owner && + TD_IS_RUNNING(owner)) + cpu_spinwait(); + continue; + } + } +#endif + ts = turnstile_trywait(&m->lock_object); v = m->mtx_lock; @@ -350,37 +375,34 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, MPASS(v != MTX_CONTESTED); +#ifdef ADAPTIVE_MUTEXES /* - * If the mutex isn't already contested and a failure occurs - * setting the contested bit, the mutex was either released - * or the state of the MTX_RECURSED bit changed. + * If the current owner of the lock is executing on another + * CPU quit the hard path and try to spin. */ - if ((v & MTX_CONTESTED) == 0 && - !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { + owner = (struct thread *)(v & ~MTX_FLAGMASK); +#ifdef ADAPTIVE_GIANT + if (TD_IS_RUNNING(owner)) { +#else + if (m != &Giant && TD_IS_RUNNING(owner)) { +#endif turnstile_cancel(ts); cpu_spinwait(); continue; } +#endif -#ifdef ADAPTIVE_MUTEXES /* - * If the current owner of the lock is executing on another - * CPU, spin instead of blocking. + * If the mutex isn't already contested and a failure occurs + * setting the contested bit, the mutex was either released + * or the state of the MTX_RECURSED bit changed. */ - owner = (struct thread *)(v & ~MTX_FLAGMASK); -#ifdef ADAPTIVE_GIANT - if (TD_IS_RUNNING(owner)) -#else - if (m != &Giant && TD_IS_RUNNING(owner)) -#endif - { + if ((v & MTX_CONTESTED) == 0 && + !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { turnstile_cancel(ts); - while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { - cpu_spinwait(); - } + cpu_spinwait(); continue; } -#endif /* ADAPTIVE_MUTEXES */ /* * We definitely must sleep for this lock. @@ -589,17 +611,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); -#ifdef ADAPTIVE_MUTEXES - if (ts == NULL) { - _release_lock_quick(m); - if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); - turnstile_chain_unlock(&m->lock_object); - return; - } -#else MPASS(ts != NULL); -#endif turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _release_lock_quick(m); /* |