aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2001-05-21 18:47:17 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2001-05-21 18:47:17 +0000
commit270b041d95c6b7cb83d52c869973ff029521540f (patch)
tree16822830b92d8ffc7f2991279241b871aff98c14
parentd8aad40c887d356320a70a4c4e5d628a8f13f931 (diff)
downloadsrc-270b041d95c6b7cb83d52c869973ff029521540f.tar.gz
src-270b041d95c6b7cb83d52c869973ff029521540f.zip
- Assert that the vm mutex is held in pipe_free_kmem().
- Don't release the vm mutex early in pipespace() but instead hold it across vm_object_deallocate() if vm_map_find() returns an error and across pipe_free_kmem() if vm_map_find() succeeds. - Add a XXX above a zfree() since zalloc already has its own locking, one would hope that zfree() wouldn't need the vm lock.
Notes
Notes: svn path=/head/; revision=76940
-rw-r--r--sys/kern/sys_pipe.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index a78844898909..8c34a9aff3e5 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -266,15 +266,16 @@ pipespace(cpipe, size)
error = vm_map_find(kernel_map, object, 0,
(vm_offset_t *) &buffer, size, 1,
VM_PROT_ALL, VM_PROT_ALL, 0);
- mtx_unlock(&vm_mtx);
if (error != KERN_SUCCESS) {
vm_object_deallocate(object);
+ mtx_unlock(&vm_mtx);
return (ENOMEM);
}
/* free old resources if we're resizing */
pipe_free_kmem(cpipe);
+ mtx_unlock(&vm_mtx);
cpipe->pipe_buffer.object = object;
cpipe->pipe_buffer.buffer = buffer;
cpipe->pipe_buffer.size = size;
@@ -1151,6 +1152,7 @@ pipe_free_kmem(cpipe)
struct pipe *cpipe;
{
+ mtx_assert(&vm_mtx, MA_OWNED);
if (cpipe->pipe_buffer.buffer != NULL) {
if (cpipe->pipe_buffer.size > PIPE_SIZE)
--nbigpipe;
@@ -1212,6 +1214,9 @@ pipeclose(cpipe)
*/
mtx_lock(&vm_mtx);
pipe_free_kmem(cpipe);
+ /* XXX: erm, doesn't zalloc already have its own locks and
+ * not need the giant vm lock?
+ */
zfree(pipe_zone, cpipe);
mtx_unlock(&vm_mtx);
}