diff options
author | Robert Watson <rwatson@FreeBSD.org> | 2007-04-04 09:11:34 +0000 |
---|---|---|
committer | Robert Watson <rwatson@FreeBSD.org> | 2007-04-04 09:11:34 +0000 |
commit | 5e3f7694b189584ae30219566fcc6c4c3d059305 (patch) | |
tree | 8805b4674ed3429ddf05f19fd5eb0813fb7884aa /sys/kern/kern_event.c | |
parent | f2f72fff6e246e95be9fea218268c6c609b6294b (diff) | |
download | src-5e3f7694b189584ae30219566fcc6c4c3d059305.tar.gz src-5e3f7694b189584ae30219566fcc6c4c3d059305.zip |
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
Notes
Notes:
svn path=/head/; revision=168355
Diffstat (limited to 'sys/kern/kern_event.c')
-rw-r--r-- | sys/kern/kern_event.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index 8e2a10da6fba..87706a1fa7d4 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -527,15 +527,15 @@ kqueue(struct thread *td, struct kqueue_args *uap) knlist_init(&kq->kq_sel.si_note, &kq->kq_lock, NULL, NULL, NULL); TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); - FILEDESC_LOCK_FAST(fdp); + FILEDESC_XLOCK(fdp); SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); - FILEDESC_UNLOCK_FAST(fdp); + FILEDESC_XUNLOCK(fdp); FILE_LOCK(fp); fp->f_flag = FREAD | FWRITE; fp->f_type = DTYPE_KQUEUE; - fp->f_ops = &kqueueops; fp->f_data = kq; + fp->f_ops = &kqueueops; FILE_UNLOCK(fp); fdrop(fp, td); @@ -1493,9 +1493,9 @@ kqueue_close(struct file *fp, struct thread *td) KQ_UNLOCK(kq); - FILEDESC_LOCK_FAST(fdp); + FILEDESC_XLOCK(fdp); SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list); - FILEDESC_UNLOCK_FAST(fdp); + FILEDESC_XUNLOCK(fdp); knlist_destroy(&kq->kq_sel.si_note); mtx_destroy(&kq->kq_lock); @@ -1781,9 +1781,9 @@ again: /* need to reaquire lock since we have dropped it */ } /* - * remove all knotes referencing a specified fd - * must be called with FILEDESC lock. This prevents a race where a new fd - * comes along and occupies the entry and we attach a knote to the fd. + * Remove all knotes referencing a specified fd must be called with FILEDESC + * lock. This prevents a race where a new fd comes along and occupies the + * entry and we attach a knote to the fd. */ void knote_fdclose(struct thread *td, int fd) @@ -1793,7 +1793,7 @@ knote_fdclose(struct thread *td, int fd) struct knote *kn; int influx; - FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); + FILEDESC_XLOCK_ASSERT(fdp); /* * We shouldn't have to worry about new kevents appearing on fd |