aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2009-05-13 05:39:39 +0000
committerAlan Cox <alc@FreeBSD.org>2009-05-13 05:39:39 +0000
commit1c1b26f276f85b086bc6e579c452e52231c234ac (patch)
treede540785bb7f52f8c0b6353b22a06e54fff788d1 /sys/vm/vm_page.c
parentb1f26c738c3a2433f1759099fceb31d741e4f462 (diff)
downloadsrc-1c1b26f276f85b086bc6e579c452e52231c234ac.tar.gz
src-1c1b26f276f85b086bc6e579c452e52231c234ac.zip
Eliminate page queues locking from bufdone_finish() through the
following changes: Rename vfs_page_set_valid() to vfs_page_set_validclean() to reflect what this function actually does. Suggested by: tegge Introduce a new version of vfs_page_set_valid() that does no more than what the function's name implies. Specifically, it does not update the page's dirty mask, and thus it does not require the page queues lock to be held. Update two of the three callers to the old vfs_page_set_valid() to call vfs_page_set_validclean() instead because they actually require the page's dirty mask to be cleared. Introduce vm_page_set_valid(). Reviewed by: tegge
Notes
Notes: svn path=/head/; revision=192034
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 8befdd56705a..f0f90425a13a 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1852,6 +1852,51 @@ vm_page_bits(int base, int size)
}
/*
+ * vm_page_set_valid:
+ *
+ * Sets portions of a page valid. The arguments are expected
+ * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
+ * of any partial chunks touched by the range. The invalid portion of
+ * such chunks will be zeroed.
+ *
+ * (base + size) must be less then or equal to PAGE_SIZE.
+ */
+void
+vm_page_set_valid(vm_page_t m, int base, int size)
+{
+ int endoff, frag;
+
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if (size == 0) /* handle degenerate case */
+ return;
+
+ /*
+ * If the base is not DEV_BSIZE aligned and the valid
+ * bit is clear, we have to zero out a portion of the
+ * first block.
+ */
+ if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
+ (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
+ pmap_zero_page_area(m, frag, base - frag);
+
+ /*
+ * If the ending offset is not DEV_BSIZE aligned and the
+ * valid bit is clear, we have to zero out a portion of
+ * the last block.
+ */
+ endoff = base + size;
+ if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
+ (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
+ pmap_zero_page_area(m, endoff,
+ DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
+
+ /*
+ * Set valid bits inclusive of any overlap.
+ */
+ m->valid |= vm_page_bits(base, size);
+}
+
+/*
* vm_page_set_validclean:
*
* Sets portions of a page valid and clean. The arguments are expected