diff options
-rw-r--r-- | sys/kern/subr_sleepqueue.c | 69 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 75 |
2 files changed, 106 insertions, 38 deletions
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index 276d0aa14a47..f15598ea71ab 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -70,7 +70,6 @@ __FBSDID("$FreeBSD$"); #include <sys/lock.h> #include <sys/kernel.h> #include <sys/ktr.h> -#include <sys/malloc.h> #include <sys/mutex.h> #include <sys/proc.h> #include <sys/sched.h> @@ -78,6 +77,8 @@ __FBSDID("$FreeBSD$"); #include <sys/sleepqueue.h> #include <sys/sysctl.h> +#include <vm/uma.h> + #ifdef DDB #include <ddb/ddb.h> #endif @@ -142,8 +143,7 @@ SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth, 0, "maxmimum depth achieved of a single chain"); #endif static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE]; - -static MALLOC_DEFINE(M_SLEEPQUEUE, "sleepqueue", "sleep queues"); +static uma_zone_t sleepq_zone; /* * Prototypes for non-exported routines. @@ -151,9 +151,14 @@ static MALLOC_DEFINE(M_SLEEPQUEUE, "sleepqueue", "sleep queues"); static int sleepq_catch_signals(void *wchan); static int sleepq_check_signals(void); static int sleepq_check_timeout(void); +#ifdef INVARIANTS +static void sleepq_dtor(void *mem, int size, void *arg); +#endif +static int sleepq_init(void *mem, int size, int flags); +static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, + int pri); static void sleepq_switch(void *wchan); static void sleepq_timeout(void *arg); -static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri); /* * Early initialization of sleep queues that is called from the sleepinit() @@ -184,23 +189,24 @@ init_sleepqueues(void) NULL); #endif } + sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue), +#ifdef INVARIANTS + NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); +#else + NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0); +#endif + thread0.td_sleepqueue = sleepq_alloc(); } /* - * Malloc and initialize a new sleep queue for a new thread. + * Get a sleep queue for a new thread. */ struct sleepqueue * sleepq_alloc(void) { - struct sleepqueue *sq; - int i; - sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO); - for (i = 0; i < NR_SLEEPQS; i++) - TAILQ_INIT(&sq->sq_blocked[i]); - LIST_INIT(&sq->sq_free); - return (sq); + return (uma_zalloc(sleepq_zone, M_WAITOK)); } /* @@ -209,12 +215,8 @@ sleepq_alloc(void) void sleepq_free(struct sleepqueue *sq) { - int i; - MPASS(sq != NULL); - for (i = 0; i < NR_SLEEPQS; i++) - MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); - free(sq, M_SLEEPQUEUE); + uma_zfree(sleepq_zone, sq); } /* @@ -666,6 +668,39 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) setrunnable(td); } +#ifdef INVARIANTS +/* + * UMA zone item deallocator. + */ +static void +sleepq_dtor(void *mem, int size, void *arg) +{ + struct sleepqueue *sq; + int i; + + sq = mem; + for (i = 0; i < NR_SLEEPQS; i++) + MPASS(TAILQ_EMPTY(&sq->sq_blocked[i])); +} +#endif + +/* + * UMA zone item initializer. + */ +static int +sleepq_init(void *mem, int size, int flags) +{ + struct sleepqueue *sq; + int i; + + bzero(mem, size); + sq = mem; + for (i = 0; i < NR_SLEEPQS; i++) + TAILQ_INIT(&sq->sq_blocked[i]); + LIST_INIT(&sq->sq_free); + return (0); +} + /* * Find the highest priority thread sleeping on a wait channel and resume it. */ diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 1bbdf835e78b..4435751543ca 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -46,14 +46,14 @@ * chain. Each chain contains a spin mutex that protects all of the * turnstiles in the chain. * - * Each time a thread is created, a turnstile is malloc'd and attached to - * that thread. When a thread blocks on a lock, if it is the first thread - * to block, it lends its turnstile to the lock. If the lock already has - * a turnstile, then it gives its turnstile to the lock's turnstile's free - * list. When a thread is woken up, it takes a turnstile from the free list - * if there are any other waiters. If it is the only thread blocked on the - * lock, then it reclaims the turnstile associated with the lock and removes - * it from the hash table. + * Each time a thread is created, a turnstile is allocated from a UMA zone + * and attached to that thread. When a thread blocks on a lock, if it is the + * first thread to block, it lends its turnstile to the lock. If the lock + * already has a turnstile, then it gives its turnstile to the lock's + * turnstile's free list. When a thread is woken up, it takes a turnstile from + * the free list if there are any other waiters. If it is the only thread + * blocked on the lock, then it reclaims the turnstile associated with the lock + * and removes it from the hash table. */ #include <sys/cdefs.h> @@ -67,7 +67,6 @@ __FBSDID("$FreeBSD$"); #include <sys/kernel.h> #include <sys/ktr.h> #include <sys/lock.h> -#include <sys/malloc.h> #include <sys/mutex.h> #include <sys/proc.h> #include <sys/queue.h> @@ -75,6 +74,8 @@ __FBSDID("$FreeBSD$"); #include <sys/sysctl.h> #include <sys/turnstile.h> +#include <vm/uma.h> + #ifdef DDB #include <sys/kdb.h> #include <ddb/ddb.h> @@ -143,8 +144,7 @@ SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD, #endif static struct mtx td_contested_lock; static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; - -static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles"); +static uma_zone_t turnstile_zone; /* * Prototypes for non-exported routines. @@ -158,6 +158,10 @@ static int turnstile_adjust_thread(struct turnstile *ts, struct thread *td); static struct thread *turnstile_first_waiter(struct turnstile *ts); static void turnstile_setowner(struct turnstile *ts, struct thread *owner); +#ifdef INVARIANTS +static void turnstile_dtor(void *mem, int size, void *arg); +#endif +static int turnstile_init(void *mem, int size, int flags); /* * Walks the chain of turnstiles and their owners to propagate the priority @@ -379,6 +383,12 @@ static void init_turnstile0(void *dummy) { + turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile), +#ifdef INVARIANTS + NULL, turnstile_dtor, turnstile_init, NULL, UMA_ALIGN_CACHE, 0); +#else + NULL, NULL, turnstile_init, NULL, UMA_ALIGN_CACHE, 0); +#endif thread0.td_turnstile = turnstile_alloc(); } SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); @@ -447,20 +457,47 @@ turnstile_setowner(struct turnstile *ts, struct thread *owner) LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); } +#ifdef INVARIANTS /* - * Malloc a turnstile for a new thread, initialize it and return it. + * UMA zone item deallocator. */ -struct turnstile * -turnstile_alloc(void) +static void +turnstile_dtor(void *mem, int size, void *arg) +{ + struct turnstile *ts; + + ts = mem; + MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE])); + MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); + MPASS(TAILQ_EMPTY(&ts->ts_pending)); +} +#endif + +/* + * UMA zone item initializer. + */ +static int +turnstile_init(void *mem, int size, int flags) { struct turnstile *ts; - ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO); + bzero(mem, size); + ts = mem; TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]); TAILQ_INIT(&ts->ts_pending); LIST_INIT(&ts->ts_free); - return (ts); + return (0); +} + +/* + * Get a turnstile for a new thread. + */ +struct turnstile * +turnstile_alloc(void) +{ + + return (uma_zalloc(turnstile_zone, M_WAITOK)); } /* @@ -470,11 +507,7 @@ void turnstile_free(struct turnstile *ts) { - MPASS(ts != NULL); - MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE])); - MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); - MPASS(TAILQ_EMPTY(&ts->ts_pending)); - free(ts, M_TURNSTILE); + uma_zfree(turnstile_zone, ts); } /* |