From 0ffc7ed7e310f5810af6c04137678d0bef00f2f0 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Sat, 30 Sep 2017 23:41:28 +0000 Subject: Have uiomove_object_page() keep accessed pages in the active queue. Previously, uiomove_object_page() would maintain LRU by requeuing the accessed page. This involves acquiring one of the heavily contended page queue locks. Moreover, it is unnecessarily expensive for pages in the active queue. As of r254304 the page daemon continually performs a slow scan of the active queue, with the effect that unreferenced pages are gradually moved to the inactive queue, from which they can be reclaimed. Prior to that revision, the active queue was scanned only during shortages of free and inactive pages, meaning that unreferenced pages could get "stuck" in the queue. Thus, tmpfs was required to use the inactive queue and requeue pages in order to maintain LRU. Now that this is no longer the case, tmpfs I/O operations can use the active queue and avoid the page queue locks in most cases, instead setting PGA_REFERENCED on referenced pages to provide pseudo-LRU. Reviewed by: alc (previous version) MFC after: 2 weeks --- sys/kern/uipc_shm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'sys/kern/uipc_shm.c') diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index 33e0ddfd65fb..6f74f45cc74b 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -209,12 +209,10 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) } vm_page_lock(m); vm_page_hold(m); - if (m->queue == PQ_NONE) { - vm_page_deactivate(m); - } else { - /* Requeue to maintain LRU ordering. */ - vm_page_requeue(m); - } + if (m->queue != PQ_ACTIVE) + vm_page_activate(m); + else + vm_page_reference(m); vm_page_unlock(m); VM_OBJECT_WUNLOCK(obj); error = uiomove_fromphys(&m, offset, tlen, uio); -- cgit v1.2.3