aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-10-15 03:35:11 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-10-15 03:35:11 +0000
commit63e9755548e4feebf798686ab8bce0cdaaaf7b46 (patch)
tree73004f9ecd43d157304327e6d0feb4ddf93012af /sys/kern/vfs_bio.c
parentf44e7436797617b6c6a42a280befb312f1ebf50f (diff)
downloadsrc-63e9755548e4feebf798686ab8bce0cdaaaf7b46.tar.gz
src-63e9755548e4feebf798686ab8bce0cdaaaf7b46.zip
(1/6) Replace busy checks with acquires where it is trival to do so.
This is the first in a series of patches that promotes the page busy field to a first class lock that no longer requires the object lock for consistency. Reviewed by: kib, markj Tested by: pho Sponsored by: Netflix, Intel Differential Revision: https://reviews.freebsd.org/D21548
Notes
Notes: svn path=/head/; revision=353535
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c48
1 files changed, 23 insertions, 25 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 8e14592b402d..baeaf2e32dc0 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -2945,10 +2945,10 @@ vfs_vmio_invalidate(struct buf *bp)
presid = resid > (PAGE_SIZE - poffset) ?
(PAGE_SIZE - poffset) : resid;
KASSERT(presid >= 0, ("brelse: extra page"));
- while (vm_page_xbusied(m))
- vm_page_sleep_if_xbusy(m, "mbncsh");
+ vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
if (pmap_page_wired_mappings(m) == 0)
vm_page_set_invalid(m, poffset, presid);
+ vm_page_sunbusy(m);
vm_page_release_locked(m, flags);
resid -= presid;
poffset = 0;
@@ -3651,7 +3651,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
("vfs_clean_pages_dirty_buf: no buffer offset"));
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
- vfs_drain_busy_pages(bp);
+ vfs_busy_pages_acquire(bp);
vfs_setdirty_locked_object(bp);
for (i = 0; i < bp->b_npages; i++) {
noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
@@ -3663,6 +3663,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
foff = noff;
}
+ vfs_busy_pages_release(bp);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
@@ -4559,28 +4560,25 @@ vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
}
/*
- * Ensure that all buffer pages are not exclusive busied. If any page is
- * exclusive busy, drain it.
+ * Acquire a shared busy on all pages in the buf.
*/
void
-vfs_drain_busy_pages(struct buf *bp)
+vfs_busy_pages_acquire(struct buf *bp)
{
- vm_page_t m;
- int i, last_busied;
+ int i;
VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
- last_busied = 0;
- for (i = 0; i < bp->b_npages; i++) {
- m = bp->b_pages[i];
- if (vm_page_xbusied(m)) {
- for (; last_busied < i; last_busied++)
- vm_page_sbusy(bp->b_pages[last_busied]);
- while (vm_page_xbusied(m)) {
- vm_page_sleep_if_xbusy(m, "vbpage");
- }
- }
- }
- for (i = 0; i < last_busied; i++)
+ for (i = 0; i < bp->b_npages; i++)
+ vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
+}
+
+void
+vfs_busy_pages_release(struct buf *bp)
+{
+ int i;
+
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
+ for (i = 0; i < bp->b_npages; i++)
vm_page_sunbusy(bp->b_pages[i]);
}
@@ -4613,17 +4611,17 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
VM_OBJECT_WLOCK(obj);
- vfs_drain_busy_pages(bp);
+ if ((bp->b_flags & B_CLUSTER) == 0) {
+ vm_object_pip_add(obj, bp->b_npages);
+ vfs_busy_pages_acquire(bp);
+ }
if (bp->b_bufsize != 0)
vfs_setdirty_locked_object(bp);
bogus = false;
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
+ vm_page_assert_sbusied(m);
- if ((bp->b_flags & B_CLUSTER) == 0) {
- vm_object_pip_add(obj, 1);
- vm_page_sbusy(m);
- }
/*
* When readying a buffer for a read ( i.e
* clear_modify == 0 ), it is important to do