diff options
author | John Baldwin <jhb@FreeBSD.org> | 2005-04-04 21:53:56 +0000 |
---|---|---|
committer | John Baldwin <jhb@FreeBSD.org> | 2005-04-04 21:53:56 +0000 |
commit | c6a37e84139a1c73d4ef46ce4fdf8598a0ebbf45 (patch) | |
tree | d48c8aa642d31e026486326f9d281f4d5eff0bdb /sys/amd64 | |
parent | 426494536e36762242e1e977fd33b4b0c11c96c9 (diff) | |
download | src-c6a37e84139a1c73d4ef46ce4fdf8598a0ebbf45.tar.gz src-c6a37e84139a1c73d4ef46ce4fdf8598a0ebbf45.zip |
Divorce critical sections from spinlocks. Critical sections as denoted by
critical_enter() and critical_exit() are now solely a mechanism for
deferring kernel preemptions. They no longer have any affect on
interrupts. This means that standalone critical sections are now very
cheap as they are simply unlocked integer increments and decrements for the
common case.
Spin mutexes now use a separate KPI implemented in MD code: spinlock_enter()
and spinlock_exit(). This KPI is responsible for providing whatever MD
guarantees are needed to ensure that a thread holding a spin lock won't
be preempted by any other code that will try to lock the same lock. For
now all archs continue to block interrupts in a "spinlock section" as they
did formerly in all critical sections. Note that I've also taken this
opportunity to push a few things into MD code rather than MI. For example,
critical_fork_exit() no longer exists. Instead, MD code ensures that new
threads have the correct state when they are created. Also, we no longer
try to fixup the idlethreads for APs in MI code. Instead, each arch sets
the initial curthread and adjusts the state of the idle thread it borrows
in order to perform the initial context switch.
This change is largely a big NOP, but the cleaner separation it provides
will allow for more efficient alternative locking schemes in other parts
of the kernel (bare critical sections rather than per-CPU spin mutexes
for per-CPU data for example).
Reviewed by: grehan, cognet, arch@, others
Tested on: i386, alpha, sparc64, powerpc, arm, possibly more
Notes
Notes:
svn path=/head/; revision=144637
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/amd64/critical.c | 48 | ||||
-rw-r--r-- | sys/amd64/amd64/machdep.c | 24 | ||||
-rw-r--r-- | sys/amd64/amd64/mp_machdep.c | 16 | ||||
-rw-r--r-- | sys/amd64/amd64/vm_machdep.c | 8 | ||||
-rw-r--r-- | sys/amd64/include/critical.h | 87 | ||||
-rw-r--r-- | sys/amd64/include/proc.h | 3 |
6 files changed, 50 insertions, 136 deletions
diff --git a/sys/amd64/amd64/critical.c b/sys/amd64/amd64/critical.c deleted file mode 100644 index 925a91ca2865..000000000000 --- a/sys/amd64/amd64/critical.c +++ /dev/null @@ -1,48 +0,0 @@ -/*- - * Copyright (c) 2002 Matthew Dillon. All Rights Reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <sys/cdefs.h> -__FBSDID("$FreeBSD$"); - -#include <sys/param.h> -#include <sys/systm.h> -#include <sys/proc.h> - -#include <machine/critical.h> -#include <machine/psl.h> - -/* - * cpu_critical_fork_exit() - cleanup after fork - * - * Enable interrupts in the saved copy of eflags. - */ -void -cpu_critical_fork_exit(void) -{ - - curthread->td_md.md_savecrit = read_rflags() | PSL_I; -} diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index da9e5fd834b5..9ee122951f92 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -1302,6 +1302,30 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) pcpu->pc_acpi_id = 0xffffffff; } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_flags = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_flags); +} + /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c index fa6723bb4d8d..aab6e8b55eae 100644 --- a/sys/amd64/amd64/mp_machdep.c +++ b/sys/amd64/amd64/mp_machdep.c @@ -452,6 +452,10 @@ init_secondary(void) panic("cpuid mismatch! boom!!"); } + /* Initialize curthread. */ + KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); + PCPU_SET(curthread, PCPU_GET(idlethread)); + mtx_lock_spin(&ap_boot_mtx); /* Init local apic for irq's */ @@ -490,6 +494,18 @@ init_secondary(void) /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); + /* + * Correct spinlock nesting. The idle thread context that we are + * borrowing was created so that it would start out with a single + * spin lock (sched_lock) held in fork_trampoline(). Since we've + * explicitly acquired locks in this function, the nesting count + * is now 2 rather than 1. Since we are nested, calling + * spinlock_exit() will simply adjust the counts without allowing + * spin lock using code to interrupt us. + */ + spinlock_exit(); + KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 9b9981234aef..dd1e4bf35266 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -163,6 +163,10 @@ cpu_fork(td1, p2, td2, flags) * pcb2->pcb_[fg]sbase: cloned above */ + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; + /* * Now, cpu_switch() can schedule the new process. * pcb_rsp is loaded pointing to the cpu_switch() stack frame @@ -294,6 +298,10 @@ cpu_set_upcall(struct thread *td, struct thread *td0) * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_[fg]sbase: cloned above */ + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; } /* diff --git a/sys/amd64/include/critical.h b/sys/amd64/include/critical.h deleted file mode 100644 index ac85f2f452c2..000000000000 --- a/sys/amd64/include/critical.h +++ /dev/null @@ -1,87 +0,0 @@ -/*- - * Copyright (c) 2002 Matthew Dillon. All Rights Reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file contains prototypes and high-level inlines related to - * machine-level critical function support: - * - * cpu_critical_enter() - inlined - * cpu_critical_exit() - inlined - * cpu_critical_fork_exit() - prototyped - * related support functions residing - * in <arch>/<arch>/critical.c - prototyped - * - * $FreeBSD$ - */ - -#ifndef _MACHINE_CRITICAL_H_ -#define _MACHINE_CRITICAL_H_ - -__BEGIN_DECLS - -/* - * Prototypes - see <arch>/<arch>/critical.c - */ -void cpu_critical_fork_exit(void); - -#ifdef __CC_SUPPORTS___INLINE - -/* - * cpu_critical_enter: - * - * This routine is called from critical_enter() on the 0->1 transition - * of td_critnest, prior to it being incremented to 1. - */ -static __inline void -cpu_critical_enter(struct thread *td) -{ - - td->td_md.md_savecrit = intr_disable(); -} - -/* - * cpu_critical_exit: - * - * This routine is called from critical_exit() on a 1->0 transition - * of td_critnest, after it has been decremented to 0. We are - * exiting the last critical section. - */ -static __inline void -cpu_critical_exit(struct thread *td) -{ - intr_restore(td->td_md.md_savecrit); -} - -#else /* !__CC_SUPPORTS___INLINE */ - -void cpu_critical_enter(struct thread *td); -void cpu_critical_exit(struct thread *td); - -#endif /* __CC_SUPPORTS___INLINE */ - -__END_DECLS - -#endif /* !_MACHINE_CRITICAL_H_ */ - diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h index b019455e5cd1..ba6354b68dce 100644 --- a/sys/amd64/include/proc.h +++ b/sys/amd64/include/proc.h @@ -37,7 +37,8 @@ * Machine-dependent part of the proc structure for AMD64. */ struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_flags; /* (k) */ }; struct mdproc { |