aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-12-15 03:15:06 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-12-15 03:15:06 +0000
commita80817786433b8c78023e5086809bd673e704fd1 (patch)
treec99d966314ef679adeaa8e12bb2cb88becf174ef /sys/kern
parentd966c7615f74abea52e6f12981f93f16c5b4d421 (diff)
downloadsrc-a80817786433b8c78023e5086809bd673e704fd1.tar.gz
src-a80817786433b8c78023e5086809bd673e704fd1.zip
Add a deferred free mechanism for freeing swap space that does not require
an exclusive object lock. Previously swap space was freed on a best effort basis when a page that had valid swap was dirtied, thus invalidating the swap copy. This may be done inconsistently and requires the object lock which is not always convenient. Instead, track when swap space is present. The first dirty is responsible for deleting space or setting PGA_SWAP_FREE which will trigger background scans to free the swap space. Simplify the locking in vm_fault_dirty() now that we can reliably identify the first dirty. Discussed with: alc, kib, markj Differential Revision: https://reviews.freebsd.org/D22654
Notes
Notes: svn path=/head/; revision=355765
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/uipc_shm.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 1891284503ab..63b460f369f5 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -198,7 +198,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
* type object.
*/
rv = vm_page_grab_valid(&m, obj, idx,
- VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOBUSY);
+ VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
if (rv != VM_PAGER_OK) {
VM_OBJECT_WUNLOCK(obj);
printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
@@ -207,13 +207,10 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
}
VM_OBJECT_WUNLOCK(obj);
error = uiomove_fromphys(&m, offset, tlen, uio);
- if (uio->uio_rw == UIO_WRITE && error == 0) {
- VM_OBJECT_WLOCK(obj);
- vm_page_dirty(m);
- vm_pager_page_unswapped(m);
- VM_OBJECT_WUNLOCK(obj);
- }
- vm_page_unwire(m, PQ_ACTIVE);
+ if (uio->uio_rw == UIO_WRITE && error == 0)
+ vm_page_set_dirty(m);
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ vm_page_sunbusy(m);
return (error);
}
@@ -527,9 +524,8 @@ retry:
pmap_zero_page_area(m, base, PAGE_SIZE - base);
KASSERT(vm_page_all_valid(m),
("shm_dotruncate: page %p is invalid", m));
- vm_page_dirty(m);
+ vm_page_set_dirty(m);
vm_page_xunbusy(m);
- vm_pager_page_unswapped(m);
}
}
delta = IDX_TO_OFF(object->size - nobjsize);