diff options
author | Julian Elischer <julian@FreeBSD.org> | 2004-09-01 02:11:28 +0000 |
---|---|---|
committer | Julian Elischer <julian@FreeBSD.org> | 2004-09-01 02:11:28 +0000 |
commit | 2630e4c90c7b775eb0248be7594a699839363dda (patch) | |
tree | 16691683be7649ae0915de2c1fa1995b931091a3 /sys/kern/sched_ule.c | |
parent | b443062227d14d9d43c2c1c5825952ac5b6417cb (diff) | |
download | src-2630e4c90c7b775eb0248be7594a699839363dda.tar.gz src-2630e4c90c7b775eb0248be7594a699839363dda.zip |
Give setrunqueue() and sched_add() more of a clue as to
where they are coming from and what is expected from them.
MFC after: 2 days
Notes
Notes:
svn path=/head/; revision=134586
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r-- | sys/kern/sched_ule.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 0e88c7bba932..5582a40f0d90 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1183,7 +1183,7 @@ sched_switch(struct thread *td, struct thread *newtd) * Don't allow the kse to migrate from a preemption. */ ke->ke_flags |= KEF_HOLD; - setrunqueue(td); + setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); } else { if (ke->ke_runq) { kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); @@ -1281,7 +1281,7 @@ sched_wakeup(struct thread *td) td->td_kse, hzticks); td->td_slptime = 0; } - setrunqueue(td); + setrunqueue(td, SRQ_BORING); } /* @@ -1581,10 +1581,19 @@ restart: } void -sched_add(struct thread *td) +sched_add(struct thread *td, int flags) { - sched_add_internal(td, 1); + /* let jeff work out how to map the flags better */ + /* I'm open to suggestions */ + if (flags & SRQ_YIELDING) + /* + * Preempting during switching can be bad JUJU + * especially for KSE processes + */ + sched_add_internal(td, 0); + else + sched_add_internal(td, 1); } static void |