aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2005-01-05 21:13:27 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2005-01-05 21:13:27 +0000
commit33fb8a386ea1cd596a885922ed141baab0fc053c (patch)
tree08a77e52d9c917cc5536ec8885e05c9cb42f5cd1 /sys/kern/kern_mutex.c
parent0027ba028a2fcdfa3556a9ab72c89ea5aa8c0d2a (diff)
downloadsrc-33fb8a386ea1cd596a885922ed141baab0fc053c.tar.gz
src-33fb8a386ea1cd596a885922ed141baab0fc053c.zip
Rework the optimization for spinlocks on UP to be slightly less drastic and
turn it back on. Specifically, the actual changes are now less intrusive in that the _get_spin_lock() and _rel_spin_lock() macros now have their contents changed for UP vs SMP kernels which centralizes the changes. Also, UP kernels do not use _mtx_lock_spin() and no longer include it. The UP versions of the spin lock functions do not use any atomic operations, but simple compares and stores which allow mtx_owned() to still work for spin locks while removing the overhead of atomic operations. Tested on: i386, alpha
Notes
Notes: svn path=/head/; revision=139733
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index af1406732f26..ed8f7ecae2e1 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -374,11 +374,7 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
file, line);
-#if defined(SMP) || LOCK_DEBUG > 0 || 1
_get_spin_lock(m, curthread, opts, file, line);
-#else
- critical_enter();
-#endif
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
@@ -396,11 +392,7 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
line);
mtx_assert(m, MA_OWNED);
-#if defined(SMP) || LOCK_DEBUG > 0 || 1
_rel_spin_lock(m);
-#else
- critical_exit();
-#endif
}
/*
@@ -573,6 +565,7 @@ _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file,
return;
}
+#ifdef SMP
/*
* _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
*
@@ -620,6 +613,7 @@ _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file,
return;
}
+#endif /* SMP */
/*
* _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.