aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_fork.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2001-12-18 00:27:18 +0000
commit7e1f6dfe9d7ac65419d57b36dee19cd1a3e996f5 (patch)
treebd1f842c61588e8478e798dece6dff8b2be41310 /sys/kern/kern_fork.c
parent59f2fc2ca4bb8915d36d16365bc129a31fec0219 (diff)
downloadsrc-7e1f6dfe9d7ac65419d57b36dee19cd1a3e996f5.tar.gz
src-7e1f6dfe9d7ac65419d57b36dee19cd1a3e996f5.zip
Modify the critical section API as follows:
- The MD functions critical_enter/exit are renamed to start with a cpu_ prefix. - MI wrapper functions critical_enter/exit maintain a per-thread nesting count and a per-thread critical section saved state set when entering a critical section while at nesting level 0 and restored when exiting to nesting level 0. This moves the saved state out of spin mutexes so that interlocking spin mutexes works properly. - Most low-level MD code that used critical_enter/exit now use cpu_critical_enter/exit. MI code such as device drivers and spin mutexes use the MI wrappers. Note that since the MI wrappers store the state in the current thread, they do not have any return values or arguments. - mtx_intr_enable() is replaced with a constant CRITICAL_FORK which is assigned to curthread->td_savecrit during fork_exit(). Tested on: i386, alpha
Notes
Notes: svn path=/head/; revision=88088
Diffstat (limited to 'sys/kern/kern_fork.c')
-rw-r--r--sys/kern/kern_fork.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index bb52a34b8ef0..bc03078976f8 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -467,9 +467,6 @@ again:
if (p1->p_sflag & PS_PROFIL)
startprofclock(p2);
mtx_unlock_spin(&sched_lock);
- /*
- * We start off holding one spinlock after fork: sched_lock.
- */
PROC_LOCK(p1);
p2->p_ucred = crhold(p1->p_ucred);
p2->p_thread.td_ucred = crhold(p2->p_ucred); /* XXXKSE */
@@ -766,10 +763,8 @@ fork_exit(callout, arg, frame)
*/
sched_lock.mtx_lock = (uintptr_t)td;
sched_lock.mtx_recurse = 0;
- /*
- * XXX: We really shouldn't have to do this.
- */
- mtx_intr_enable(&sched_lock);
+ td->td_critnest = 1;
+ td->td_savecrit = CRITICAL_FORK;
CTR3(KTR_PROC, "fork_exit: new proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
if (PCPU_GET(switchtime.tv_sec) == 0)