diff options
author | Mark Johnston <markj@FreeBSD.org> | 2019-07-29 22:01:28 +0000 |
---|---|---|
committer | Mark Johnston <markj@FreeBSD.org> | 2019-07-29 22:01:28 +0000 |
commit | 98549e2dc6fb0c38fef2a5357b10c4eb99674d9d (patch) | |
tree | 936bb59b20e13c4792fdc1b407ad6aee087773c8 /sys/vm | |
parent | 724450761630cc0b3d8991ec2de00a8ceb507384 (diff) | |
download | src-98549e2dc6fb0c38fef2a5357b10c4eb99674d9d.tar.gz src-98549e2dc6fb0c38fef2a5357b10c4eb99674d9d.zip |
Centralize the logic in vfs_vmio_unwire() and sendfile_free_page().
Both of these functions atomically unwire a page, optionally attempt
to free the page, and enqueue or requeue the page. Add functions
vm_page_release() and vm_page_release_locked() to perform the same task.
The latter must be called with the page's object lock held.
As a side effect of this refactoring, the buffer cache will no longer
attempt to free mapped pages when completing direct I/O. This is
consistent with the handling of pages by sendfile(SF_NOCACHE).
Reviewed by: alc, kib
MFC after: 2 weeks
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20986
Notes
Notes:
svn path=/head/; revision=350431
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/vm_page.c | 97 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 7 |
2 files changed, 85 insertions, 19 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 3d80dcda16d0..26398a7a7408 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -3747,29 +3747,92 @@ vm_page_unswappable(vm_page_t m) vm_page_enqueue(m, PQ_UNSWAPPABLE); } +static void +vm_page_release_toq(vm_page_t m, int flags) +{ + + /* + * Use a check of the valid bits to determine whether we should + * accelerate reclamation of the page. The object lock might not be + * held here, in which case the check is racy. At worst we will either + * accelerate reclamation of a valid page and violate LRU, or + * unnecessarily defer reclamation of an invalid page. + * + * If we were asked to not cache the page, place it near the head of the + * inactive queue so that is reclaimed sooner. + */ + if ((flags & (VPR_TRYFREE | VPR_NOREUSE)) != 0 || m->valid == 0) + vm_page_deactivate_noreuse(m); + else if (vm_page_active(m)) + vm_page_reference(m); + else + vm_page_deactivate(m); +} + /* - * Attempt to free the page. If it cannot be freed, do nothing. Returns true - * if the page is freed and false otherwise. - * - * The page must be managed. The page and its containing object must be - * locked. + * Unwire a page and either attempt to free it or re-add it to the page queues. */ -bool -vm_page_try_to_free(vm_page_t m) +void +vm_page_release(vm_page_t m, int flags) +{ + vm_object_t object; + bool freed; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("vm_page_release: page %p is unmanaged", m)); + + vm_page_lock(m); + if (m->object != NULL) + VM_OBJECT_ASSERT_UNLOCKED(m->object); + if (vm_page_unwire_noq(m)) { + if ((object = m->object) == NULL) { + vm_page_free(m); + } else { + freed = false; + if ((flags & VPR_TRYFREE) != 0 && !vm_page_busied(m) && + /* Depends on type stability. */ + VM_OBJECT_TRYWLOCK(object)) { + /* + * Only free unmapped pages. The busy test from + * before the object was locked cannot be relied + * upon. + */ + if ((object->ref_count == 0 || + !pmap_page_is_mapped(m)) && m->dirty == 0 && + !vm_page_busied(m)) { + vm_page_free(m); + freed = true; + } + VM_OBJECT_WUNLOCK(object); + } + + if (!freed) + vm_page_release_toq(m, flags); + } + } + vm_page_unlock(m); +} + +/* See vm_page_release(). */ +void +vm_page_release_locked(vm_page_t m, int flags) { - vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); - if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m)) - return (false); - if (m->object->ref_count != 0) { - pmap_remove_all(m); - if (m->dirty != 0) - return (false); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("vm_page_release_locked: page %p is unmanaged", m)); + + vm_page_lock(m); + if (vm_page_unwire_noq(m)) { + if ((flags & VPR_TRYFREE) != 0 && + (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) && + m->dirty == 0 && !vm_page_busied(m)) { + vm_page_free(m); + } else { + vm_page_release_toq(m, flags); + } } - vm_page_free(m); - return (true); + vm_page_unlock(m); } /* diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 3edde63abb9b..58f181d599c5 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -562,8 +562,12 @@ bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); void vm_page_reference(vm_page_t m); +#define VPR_TRYFREE 0x01 +#define VPR_NOREUSE 0x02 +void vm_page_release(vm_page_t m, int flags); +void vm_page_release_locked(vm_page_t m, int flags); bool vm_page_remove(vm_page_t); -int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); +int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex); void vm_page_requeue(vm_page_t m); @@ -574,7 +578,6 @@ void vm_page_set_valid_range(vm_page_t m, int base, int size); int vm_page_sleep_if_busy(vm_page_t m, const char *msg); vm_offset_t vm_page_startup(vm_offset_t vaddr); void vm_page_sunbusy(vm_page_t m); -bool vm_page_try_to_free(vm_page_t m); int vm_page_trysbusy(vm_page_t m); void vm_page_unhold_pages(vm_page_t *ma, int count); void vm_page_unswappable(vm_page_t m); |