aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2010-05-06 04:57:33 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2010-05-06 04:57:33 +0000
commit8c6162468b8575f42092c138f9885feea2097ce0 (patch)
treeb0708fbfab991d9936b1dbd4c8788203ed87c301 /sys/vm/vm_pageout.c
parentd238560c7c0bf4d10d4c23d4a61c8476b881ee78 (diff)
downloadsrc-8c6162468b8575f42092c138f9885feea2097ce0.tar.gz
src-8c6162468b8575f42092c138f9885feea2097ce0.zip
Add a helper function vm_pageout_page_lock(), similar to tegge'
vm_pageout_fallback_object_lock(), to obtain the page lock while having page queue lock locked, and still maintain the page position in a queue. Use the helper to lock the page in the pageout daemon and contig launder iterators instead of skipping the page if its lock is contested. Skipping locked pages easily causes pagedaemon or launder to not make a progress with page cleaning. Proposed and reviewed by: alc
Notes
Notes: svn path=/head/; revision=207694
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c72
1 files changed, 60 insertions, 12 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 9921a84b6bbe..a84b6d409653 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -215,6 +215,17 @@ static void vm_req_vmdaemon(int req);
#endif
static void vm_pageout_page_stats(void);
+static void
+vm_pageout_init_marker(vm_page_t marker, u_short queue)
+{
+
+ bzero(marker, sizeof(*marker));
+ marker->flags = PG_FICTITIOUS | PG_MARKER;
+ marker->oflags = VPO_BUSY;
+ marker->queue = queue;
+ marker->wire_count = 1;
+}
+
/*
* vm_pageout_fallback_object_lock:
*
@@ -237,16 +248,8 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
u_short queue;
vm_object_t object;
- /*
- * Initialize our marker
- */
- bzero(&marker, sizeof(marker));
- marker.flags = PG_FICTITIOUS | PG_MARKER;
- marker.oflags = VPO_BUSY;
- marker.queue = m->queue;
- marker.wire_count = 1;
-
queue = m->queue;
+ vm_pageout_init_marker(&marker, queue);
object = m->object;
TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl,
@@ -268,6 +271,43 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
}
/*
+ * Lock the page while holding the page queue lock. Use marker page
+ * to detect page queue changes and maintain notion of next page on
+ * page queue. Return TRUE if no changes were detected, FALSE
+ * otherwise. The page is locked on return. The page queue lock might
+ * be dropped and reacquired.
+ *
+ * This function depends on normal struct vm_page being type stable.
+ */
+boolean_t
+vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
+{
+ struct vm_page marker;
+ boolean_t unchanged;
+ u_short queue;
+
+ vm_page_lock_assert(m, MA_NOTOWNED);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
+ if (vm_page_trylock(m))
+ return (TRUE);
+
+ queue = m->queue;
+ vm_pageout_init_marker(&marker, queue);
+
+ TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq);
+ vm_page_unlock_queues();
+ vm_page_lock(m);
+ vm_page_lock_queues();
+
+ /* Page queue might have changed. */
+ *next = TAILQ_NEXT(&marker, pageq);
+ unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
+ TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq);
+ return (unchanged);
+}
+
+/*
* vm_pageout_clean:
*
* Clean the page and remove it from the laundry.
@@ -777,7 +817,11 @@ rescan0:
if (m->flags & PG_MARKER)
continue;
- if (!vm_page_trylock(m)) {
+ /*
+ * Lock the page.
+ */
+ if (!vm_pageout_page_lock(m, &next)) {
+ vm_page_unlock(m);
addl_page_shortage++;
continue;
}
@@ -1112,7 +1156,9 @@ unlock_and_continue:
m = next;
continue;
}
- if (!vm_page_trylock(m) || (object = m->object) == NULL) {
+ if (!vm_pageout_page_lock(m, &next) ||
+ (object = m->object) == NULL) {
+ vm_page_unlock(m);
m = next;
continue;
}
@@ -1375,7 +1421,9 @@ vm_pageout_page_stats()
continue;
}
vm_page_lock_assert(m, MA_NOTOWNED);
- if (vm_page_trylock(m) == 0 || (object = m->object) == NULL) {
+ if (!vm_pageout_page_lock(m, &next) ||
+ (object = m->object) == NULL) {
+ vm_page_unlock(m);
m = next;
continue;
}