aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2019-07-29 22:01:28 +0000
committerMark Johnston <markj@FreeBSD.org>2019-07-29 22:01:28 +0000
commit98549e2dc6fb0c38fef2a5357b10c4eb99674d9d (patch)
tree936bb59b20e13c4792fdc1b407ad6aee087773c8 /sys/vm/vm_page.c
parent724450761630cc0b3d8991ec2de00a8ceb507384 (diff)
downloadsrc-98549e2dc6fb0c38fef2a5357b10c4eb99674d9d.tar.gz
src-98549e2dc6fb0c38fef2a5357b10c4eb99674d9d.zip
Centralize the logic in vfs_vmio_unwire() and sendfile_free_page().
Both of these functions atomically unwire a page, optionally attempt to free the page, and enqueue or requeue the page. Add functions vm_page_release() and vm_page_release_locked() to perform the same task. The latter must be called with the page's object lock held. As a side effect of this refactoring, the buffer cache will no longer attempt to free mapped pages when completing direct I/O. This is consistent with the handling of pages by sendfile(SF_NOCACHE). Reviewed by: alc, kib MFC after: 2 weeks Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D20986
Notes
Notes: svn path=/head/; revision=350431
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c97
1 files changed, 80 insertions, 17 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 3d80dcda16d0..26398a7a7408 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -3747,29 +3747,92 @@ vm_page_unswappable(vm_page_t m)
vm_page_enqueue(m, PQ_UNSWAPPABLE);
}
+static void
+vm_page_release_toq(vm_page_t m, int flags)
+{
+
+ /*
+ * Use a check of the valid bits to determine whether we should
+ * accelerate reclamation of the page. The object lock might not be
+ * held here, in which case the check is racy. At worst we will either
+ * accelerate reclamation of a valid page and violate LRU, or
+ * unnecessarily defer reclamation of an invalid page.
+ *
+ * If we were asked to not cache the page, place it near the head of the
+ * inactive queue so that is reclaimed sooner.
+ */
+ if ((flags & (VPR_TRYFREE | VPR_NOREUSE)) != 0 || m->valid == 0)
+ vm_page_deactivate_noreuse(m);
+ else if (vm_page_active(m))
+ vm_page_reference(m);
+ else
+ vm_page_deactivate(m);
+}
+
/*
- * Attempt to free the page. If it cannot be freed, do nothing. Returns true
- * if the page is freed and false otherwise.
- *
- * The page must be managed. The page and its containing object must be
- * locked.
+ * Unwire a page and either attempt to free it or re-add it to the page queues.
*/
-bool
-vm_page_try_to_free(vm_page_t m)
+void
+vm_page_release(vm_page_t m, int flags)
+{
+ vm_object_t object;
+ bool freed;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("vm_page_release: page %p is unmanaged", m));
+
+ vm_page_lock(m);
+ if (m->object != NULL)
+ VM_OBJECT_ASSERT_UNLOCKED(m->object);
+ if (vm_page_unwire_noq(m)) {
+ if ((object = m->object) == NULL) {
+ vm_page_free(m);
+ } else {
+ freed = false;
+ if ((flags & VPR_TRYFREE) != 0 && !vm_page_busied(m) &&
+ /* Depends on type stability. */
+ VM_OBJECT_TRYWLOCK(object)) {
+ /*
+ * Only free unmapped pages. The busy test from
+ * before the object was locked cannot be relied
+ * upon.
+ */
+ if ((object->ref_count == 0 ||
+ !pmap_page_is_mapped(m)) && m->dirty == 0 &&
+ !vm_page_busied(m)) {
+ vm_page_free(m);
+ freed = true;
+ }
+ VM_OBJECT_WUNLOCK(object);
+ }
+
+ if (!freed)
+ vm_page_release_toq(m, flags);
+ }
+ }
+ vm_page_unlock(m);
+}
+
+/* See vm_page_release(). */
+void
+vm_page_release_locked(vm_page_t m, int flags)
{
- vm_page_assert_locked(m);
VM_OBJECT_ASSERT_WLOCKED(m->object);
- KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m));
- if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m))
- return (false);
- if (m->object->ref_count != 0) {
- pmap_remove_all(m);
- if (m->dirty != 0)
- return (false);
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("vm_page_release_locked: page %p is unmanaged", m));
+
+ vm_page_lock(m);
+ if (vm_page_unwire_noq(m)) {
+ if ((flags & VPR_TRYFREE) != 0 &&
+ (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
+ m->dirty == 0 && !vm_page_busied(m)) {
+ vm_page_free(m);
+ } else {
+ vm_page_release_toq(m, flags);
+ }
}
- vm_page_free(m);
- return (true);
+ vm_page_unlock(m);
}
/*