aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Xu <davidxu@FreeBSD.org>2003-08-26 11:33:15 +0000
committerDavid Xu <davidxu@FreeBSD.org>2003-08-26 11:33:15 +0000
commitab2baa7254781a77b2d7173136078edb70fd605d (patch)
tree4bb8e20b04742b61950c8bc397dd63f106ff3868
parent6e433dcdea3c7ca411c6c5a2455d0437b81d331b (diff)
downloadsrc-ab2baa7254781a77b2d7173136078edb70fd605d.tar.gz
src-ab2baa7254781a77b2d7173136078edb70fd605d.zip
Let SA process work under ULE scheduler, originally it would panic kernel.
Reviewed by: jeff
Notes
Notes: svn path=/head/; revision=119488
-rw-r--r--sys/kern/kern_kse.c19
-rw-r--r--sys/kern/kern_thread.c19
-rw-r--r--sys/kern/sched_ule.c35
3 files changed, 49 insertions, 24 deletions
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index 2af36c4dd668..10831cf2f8c6 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -259,8 +259,7 @@ kse_unlink(struct kse *ke)
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
}
- if (--kg->kg_kses == 0)
- ksegrp_unlink(kg);
+ --kg->kg_kses;
/*
* Aggregate stats from the KSE
*/
@@ -396,6 +395,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
struct thread *td2;
p = td->td_proc;
+
if (!(p->p_flag & P_SA))
return (EINVAL);
@@ -696,14 +696,18 @@ kse_create(struct thread *td, struct kse_create_args *uap)
kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
+ PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (p->p_numksegrps >= max_groups_per_proc) {
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
ksegrp_free(newkg);
return (EPROCLIM);
}
ksegrp_link(newkg, p);
+ sched_fork_ksegrp(kg, newkg);
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
} else {
if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
return (EINVAL);
@@ -748,6 +752,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
#endif
mtx_lock_spin(&sched_lock);
kse_link(newke, newkg);
+ sched_fork_kse(td->td_kse, newke);
/* Add engine */
kse_reassign(newke);
mtx_unlock_spin(&sched_lock);
@@ -1266,13 +1271,20 @@ thread_exit(void)
if (td->td_upcall)
upcall_remove(td);
+ sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
+ sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
/*
* Decide what to do with the KSE attached to this thread.
*/
- if (ke->ke_flags & KEF_EXIT)
+ if (ke->ke_flags & KEF_EXIT) {
kse_unlink(ke);
+ if (kg->kg_kses == 0) {
+ sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
+ ksegrp_unlink(kg);
+ }
+ }
else
kse_reassign(ke);
PROC_UNLOCK(p);
@@ -1484,6 +1496,7 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
td2->td_inhibitors = 0;
SIGFILLSET(td2->td_sigmask);
SIG_CANTMASK(td2->td_sigmask);
+ sched_fork_thread(td, td2);
return (td2); /* bogus.. should be a void function */
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 2af36c4dd668..10831cf2f8c6 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -259,8 +259,7 @@ kse_unlink(struct kse *ke)
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses--;
}
- if (--kg->kg_kses == 0)
- ksegrp_unlink(kg);
+ --kg->kg_kses;
/*
* Aggregate stats from the KSE
*/
@@ -396,6 +395,7 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
struct thread *td2;
p = td->td_proc;
+
if (!(p->p_flag & P_SA))
return (EINVAL);
@@ -696,14 +696,18 @@ kse_create(struct thread *td, struct kse_create_args *uap)
kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
+ PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (p->p_numksegrps >= max_groups_per_proc) {
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
ksegrp_free(newkg);
return (EPROCLIM);
}
ksegrp_link(newkg, p);
+ sched_fork_ksegrp(kg, newkg);
mtx_unlock_spin(&sched_lock);
+ PROC_UNLOCK(p);
} else {
if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
return (EINVAL);
@@ -748,6 +752,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
#endif
mtx_lock_spin(&sched_lock);
kse_link(newke, newkg);
+ sched_fork_kse(td->td_kse, newke);
/* Add engine */
kse_reassign(newke);
mtx_unlock_spin(&sched_lock);
@@ -1266,13 +1271,20 @@ thread_exit(void)
if (td->td_upcall)
upcall_remove(td);
+ sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
+ sched_exit_kse(FIRST_KSE_IN_PROC(p), ke);
ke->ke_state = KES_UNQUEUED;
ke->ke_thread = NULL;
/*
* Decide what to do with the KSE attached to this thread.
*/
- if (ke->ke_flags & KEF_EXIT)
+ if (ke->ke_flags & KEF_EXIT) {
kse_unlink(ke);
+ if (kg->kg_kses == 0) {
+ sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg);
+ ksegrp_unlink(kg);
+ }
+ }
else
kse_reassign(ke);
PROC_UNLOCK(p);
@@ -1484,6 +1496,7 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
td2->td_inhibitors = 0;
SIGFILLSET(td2->td_sigmask);
SIG_CANTMASK(td2->td_sigmask);
+ sched_fork_thread(td, td2);
return (td2); /* bogus.. should be a void function */
}
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index b5b31ba2d330..319a38de6ef8 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -772,18 +772,12 @@ sched_pickcpu(void)
void
sched_prio(struct thread *td, u_char prio)
{
- struct kse *ke;
- struct runq *rq;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- td->td_priority = prio;
-
if (TD_ON_RUNQ(td)) {
- rq = ke->ke_runq;
-
- runq_remove(rq, ke);
- runq_add(rq, ke);
+ adjustrunqueue(td, prio);
+ } else {
+ td->td_priority = prio;
}
}
@@ -802,15 +796,20 @@ sched_switchout(struct thread *td)
td->td_flags &= ~TDF_NEEDRESCHED;
if (TD_IS_RUNNING(td)) {
- /*
- * This queue is always correct except for idle threads which
- * have a higher priority due to priority propagation.
- */
- if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
- ke->ke_thread->td_priority > PRI_MIN_IDLE)
- ke->ke_runq = KSEQ_SELF()->ksq_curr;
- runq_add(ke->ke_runq, ke);
- /* setrunqueue(td); */
+ if (td->td_proc->p_flag & P_SA) {
+ kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
+ setrunqueue(td);
+ } else {
+ /*
+ * This queue is always correct except for idle threads which
+ * have a higher priority due to priority propagation.
+ */
+ if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
+ ke->ke_thread->td_priority > PRI_MIN_IDLE)
+ ke->ke_runq = KSEQ_SELF()->ksq_curr;
+ runq_add(ke->ke_runq, ke);
+ /* setrunqueue(td); */
+ }
return;
}
if (ke->ke_runq)