aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2008-02-06 00:04:09 +0000
committerAttilio Rao <attilio@FreeBSD.org>2008-02-06 00:04:09 +0000
commit13ddf72de77fc69e38a88a0ebcbac8cf796f8cfd (patch)
tree1b1f73dd58702a12d8c524882db74b0a9ff444d9 /sys
parent7a21dee3c28aa7b37d1a7292e34188cf51db7adf (diff)
downloadsrc-13ddf72de77fc69e38a88a0ebcbac8cf796f8cfd.tar.gz
src-13ddf72de77fc69e38a88a0ebcbac8cf796f8cfd.zip
Really, no explicit checks against against lock_class_* object should be
done in consumers code: using locks properties is much more appropriate. Fix current code doing these bogus checks. Note: Really, callout are not usable by all !(LC_SPINLOCK | LC_SLEEPABLE) primitives like rmlocks doesn't implement the generic lock layer functions, but they can be equipped for this, so the check is still valid. Tested by: matteo, kris (earlier version) Reviewed by: jhb
Notes
Notes: svn path=/head/; revision=176013
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_timeout.c4
-rw-r--r--sys/kern/subr_lock.c4
2 files changed, 4 insertions, 4 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 143e38c7715f..d48bf2e94f34 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -639,8 +639,8 @@ _callout_init_lock(c, lock, flags)
("callout_init_lock: bad flags %d", flags));
KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
- KASSERT(lock == NULL || LOCK_CLASS(lock) == &lock_class_mtx_sleep ||
- LOCK_CLASS(lock) == &lock_class_rw, ("%s: invalid lock class",
+ KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
+ (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
__func__));
c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
}
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c
index b236f8db903a..74a4148e9de4 100644
--- a/sys/kern/subr_lock.c
+++ b/sys/kern/subr_lock.c
@@ -494,7 +494,7 @@ lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
if (lock_prof_skipcount &&
(++lock_prof_count % lock_prof_skipcount) != 0)
return;
- spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
+ spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
if (spin && lock_prof_skipspin == 1)
return;
l = lock_profile_object_lookup(lo, spin, file, line);
@@ -523,7 +523,7 @@ lock_profile_release_lock(struct lock_object *lo)
if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
return;
- spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
+ spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
head = &curthread->td_lprof[spin];
critical_enter();
LIST_FOREACH(l, head, lpo_link)