aboutsummaryrefslogtreecommitdiff
path: root/sys/i386
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-12-16 20:15:04 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-12-16 20:15:04 +0000
commita94ba188c3443d90bad4a1d29bb477a7760c7267 (patch)
treed15d0bf3a1e0cef48b7e615d7101a69dd0e45141 /sys/i386
parentb5f20658ee91b62296384ec68cd1fc82a4fbe4bb (diff)
downloadsrc-a94ba188c3443d90bad4a1d29bb477a7760c7267.tar.gz
src-a94ba188c3443d90bad4a1d29bb477a7760c7267.zip
Repeat the spinlock_enter/exit pattern from amd64 on other architectures to
fix an assert violation introduced in r355784. Without this spinlock_exit() may see owepreempt and switch before reducing the spinlock count. amd64 had been optimized to do a single critical enter/exit regardless of the number of spinlocks which avoided the problem and this optimization had not been applied elsewhere. Reported by: emaste Suggested by: rlibby Discussed with: jhb, rlibby Tested by: manu (arm64)
Notes
Notes: svn path=/head/; revision=355819
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/machdep.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 60ebb3a3510e..b6a960ee0d10 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -2679,9 +2679,9 @@ spinlock_enter(void)
flags = intr_disable();
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_flags = flags;
+ critical_enter();
} else
td->td_md.md_spinlock_count++;
- critical_enter();
}
void
@@ -2691,11 +2691,12 @@ spinlock_exit(void)
register_t flags;
td = curthread;
- critical_exit();
flags = td->td_md.md_saved_flags;
td->td_md.md_spinlock_count--;
- if (td->td_md.md_spinlock_count == 0)
+ if (td->td_md.md_spinlock_count == 0) {
+ critical_exit();
intr_restore(flags);
+ }
}
#if defined(I586_CPU) && !defined(NO_F00F_HACK)