aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_glue.c
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2005-12-16 18:34:14 +0000
committerAlan Cox <alc@FreeBSD.org>2005-12-16 18:34:14 +0000
commitda61b9a69efce729d27cb9882c32c74d01cfebf0 (patch)
treeaa11bb9f1f197a97830817243044264d50c17670 /sys/vm/vm_glue.c
parent6ba9ec2d090f762965de08dfc0dcbe0f6a19145d (diff)
downloadsrc-da61b9a69efce729d27cb9882c32c74d01cfebf0.tar.gz
src-da61b9a69efce729d27cb9882c32c74d01cfebf0.zip
Use sf_buf_alloc() instead of vm_map_find() on exec_map to create the
ephemeral mappings that are used as the source for three copy operations from kernel space to user space. There are two reasons for making this change: (1) Under heavy load exec_map can fill up causing vm_map_find() to fail. When it fails, the nascent process is aborted (SIGABRT). Whereas, this reimplementation using sf_buf_alloc() sleeps. (2) Although it is possible to sleep on vm_map_find()'s failure until address space becomes available (see kmem_alloc_wait()), using sf_buf_alloc() is faster. Furthermore, the reimplementation uses a CPU private mapping, avoiding a TLB shootdown on multiprocessors. Problem uncovered by: kris@ Reviewed by: tegge@ MFC after: 3 weeks
Notes
Notes: svn path=/head/; revision=153485
Diffstat (limited to 'sys/vm/vm_glue.c')
-rw-r--r--sys/vm/vm_glue.c72
1 files changed, 72 insertions, 0 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 9deb3630d52c..3843ecd5714b 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -70,6 +70,8 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
#include <sys/shm.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@@ -239,6 +241,76 @@ vsunlock(void *addr, size_t len)
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
}
+/*
+ * Pin the page contained within the given object at the given offset. If the
+ * page is not resident, allocate and load it using the given object's pager.
+ * Return the pinned page if successful; otherwise, return NULL.
+ */
+static vm_page_t
+vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
+{
+ vm_page_t m, ma[1];
+ vm_pindex_t pindex;
+ int rv;
+
+ VM_OBJECT_LOCK(object);
+ pindex = OFF_TO_IDX(offset);
+ m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+ if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
+ ma[0] = m;
+ rv = vm_pager_get_pages(object, ma, 1, 0);
+ m = vm_page_lookup(object, pindex);
+ if (m == NULL)
+ goto out;
+ if (m->valid == 0 || rv != VM_PAGER_OK) {
+ vm_page_lock_queues();
+ vm_page_free(m);
+ vm_page_unlock_queues();
+ m = NULL;
+ goto out;
+ }
+ }
+ vm_page_lock_queues();
+ vm_page_hold(m);
+ vm_page_wakeup(m);
+ vm_page_unlock_queues();
+out:
+ VM_OBJECT_UNLOCK(object);
+ return (m);
+}
+
+/*
+ * Return a CPU private mapping to the page at the given offset within the
+ * given object. The page is pinned before it is mapped.
+ */
+struct sf_buf *
+vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
+{
+ vm_page_t m;
+
+ m = vm_imgact_hold_page(object, offset);
+ if (m == NULL)
+ return (NULL);
+ sched_pin();
+ return (sf_buf_alloc(m, SFB_CPUPRIVATE));
+}
+
+/*
+ * Destroy the given CPU private mapping and unpin the page that it mapped.
+ */
+void
+vm_imgact_unmap_page(struct sf_buf *sf)
+{
+ vm_page_t m;
+
+ m = sf_buf_page(sf);
+ sf_buf_free(sf);
+ sched_unpin();
+ vm_page_lock_queues();
+ vm_page_unhold(m);
+ vm_page_unlock_queues();
+}
+
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif