aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/kern/sched_ule.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 27e8371dff2b..fd14fc4021a3 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1749,19 +1749,19 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
*/
spinlock_enter();
thread_block_switch(td); /* This releases the lock on tdq. */
- TDQ_LOCK(tdn);
- tdq_add(tdn, td, flags);
- tdq_notify(tdn, td);
+
/*
- * After we unlock tdn the new cpu still can't switch into this
- * thread until we've unblocked it in cpu_switch(). The lock
- * pointers may match in the case of HTT cores. Don't unlock here
- * or we can deadlock when the other CPU runs the IPI handler.
+ * Acquire both run-queue locks before placing the thread on the new
+ * run-queue to avoid deadlocks created by placing a thread with a
+ * blocked lock on the run-queue of a remote processor. The deadlock
+ * occurs when a third processor attempts to lock the two queues in
+ * question while the target processor is spinning with its own
+ * run-queue lock held while waiting for the blocked lock to clear.
*/
- if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
- TDQ_UNLOCK(tdn);
- TDQ_LOCK(tdq);
- }
+ tdq_lock_pair(tdn, tdq);
+ tdq_add(tdn, td, flags);
+ tdq_notify(tdn, td);
+ TDQ_UNLOCK(tdn);
spinlock_exit();
#endif
return (TDQ_LOCKPTR(tdn));