aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2020-01-23 03:36:50 +0000
committerJeff Roberson <jeff@FreeBSD.org>2020-01-23 03:36:50 +0000
commit1eb13fce849fe14260a24656d532fd7700d92d6b (patch)
tree6d37bab4e711640eab98d04f9a7dccad30d05199 /sys/kern/sched_ule.c
parente87ad0ab37f5cb7d7bd40b5f8c8b43a83f516737 (diff)
downloadsrc-1eb13fce849fe14260a24656d532fd7700d92d6b.tar.gz
src-1eb13fce849fe14260a24656d532fd7700d92d6b.zip
Block the thread lock in sched_throw() and use cpu_switch() to unblock
it. The introduction of lockless switch in r355784 created a race to re-use the exiting thread that was only possible to hit on a hypervisor. Reported/Tested by: rlibby Discussed with: rlibby, jhb
Notes
Notes: svn path=/head/; revision=357014
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 3c35179e2bee..7dc48a2336bf 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -2894,7 +2894,7 @@ sched_throw(struct thread *td)
struct thread *newtd;
struct tdq *tdq;
- if (td == NULL) {
+ if (__predict_false(td == NULL)) {
#ifdef SMP
PCPU_SET(sched, DPCPU_PTR(tdq));
#endif
@@ -2912,13 +2912,18 @@ sched_throw(struct thread *td)
tdq_load_rem(tdq, td);
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
+ thread_lock_block(td);
}
newtd = choosethread();
spinlock_enter();
TDQ_UNLOCK(tdq);
KASSERT(curthread->td_md.md_spinlock_count == 1,
("invalid count %d", curthread->td_md.md_spinlock_count));
- cpu_throw(td, newtd); /* doesn't return */
+ /* doesn't return */
+ if (__predict_false(td == NULL))
+ cpu_throw(td, newtd); /* doesn't return */
+ else
+ cpu_switch(td, newtd, TDQ_LOCKPTR(tdq));
}
/*