aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/fs/nwfs/nwfs_io.c2
-rw-r--r--sys/fs/smbfs/smbfs_io.c2
-rw-r--r--sys/kern/uipc_syscalls.c2
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/nfsclient/nfs_bio.c2
-rw-r--r--sys/vm/swap_pager.c21
-rw-r--r--sys/vm/vm_object.c10
-rw-r--r--sys/vm/vm_page.c11
-rw-r--r--sys/vm/vm_page.h12
-rw-r--r--sys/vm/vnode_pager.c2
10 files changed, 35 insertions, 31 deletions
diff --git a/sys/fs/nwfs/nwfs_io.c b/sys/fs/nwfs/nwfs_io.c
index b74cd4a4f488..3071fe1f78ae 100644
--- a/sys/fs/nwfs/nwfs_io.c
+++ b/sys/fs/nwfs/nwfs_io.c
@@ -483,7 +483,7 @@ nwfs_getpages(ap)
* now tell them that it is ok to use.
*/
if (!error) {
- if (m->flags & PG_WANTED)
+ if (m->oflags & VPO_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);
diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c
index 756ea012e0ae..0f8ed49004ee 100644
--- a/sys/fs/smbfs/smbfs_io.c
+++ b/sys/fs/smbfs/smbfs_io.c
@@ -551,7 +551,7 @@ smbfs_getpages(ap)
* now tell them that it is ok to use.
*/
if (!error) {
- if (m->flags & PG_WANTED)
+ if (m->oflags & VPO_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 4ec0cf95ff89..df224c8be74e 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -2086,9 +2086,7 @@ retry_lookup:
VOP_UNLOCK(vp, 0, td);
VFS_UNLOCK_GIANT(vfslocked);
VM_OBJECT_LOCK(obj);
- vm_page_lock_queues();
vm_page_io_finish(pg);
- vm_page_unlock_queues();
if (!error)
VM_OBJECT_UNLOCK(obj);
mbstat.sf_iocnt++;
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index d027749c3f02..37ef55213a1e 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3299,7 +3299,6 @@ vfs_unbusy_pages(struct buf *bp)
obj = bp->b_bufobj->bo_object;
VM_OBJECT_LOCK(obj);
- vm_page_lock_queues();
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
@@ -3313,7 +3312,6 @@ vfs_unbusy_pages(struct buf *bp)
vm_object_pip_subtract(obj, 1);
vm_page_io_finish(m);
}
- vm_page_unlock_queues();
vm_object_pip_wakeupn(obj, 0);
VM_OBJECT_UNLOCK(obj);
}
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 1fd25c5a8dde..445f7fdf7c5f 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -240,7 +240,7 @@ nfs_getpages(struct vop_getpages_args *ap)
* now tell them that it is ok to use.
*/
if (!error) {
- if (m->flags & PG_WANTED)
+ if (m->oflags & VPO_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 23281e3c2a67..4e4e4db3bf88 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1056,16 +1056,14 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
bp->b_pager.pg_reqpage = reqpage - i;
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
{
int k;
for (k = i; k < j; ++k) {
bp->b_pages[k - i] = m[k];
- vm_page_flag_set(m[k], PG_SWAPINPROG);
+ m[k]->oflags |= VPO_SWAPINPROG;
}
}
- vm_page_unlock_queues();
bp->b_npages = j - i;
cnt.v_swapin++;
@@ -1093,14 +1091,15 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
swp_pager_strategy(bp);
/*
- * wait for the page we want to complete. PG_SWAPINPROG is always
+ * wait for the page we want to complete. VPO_SWAPINPROG is always
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
* is set in the meta-data.
*/
VM_OBJECT_LOCK(object);
- while ((mreq->flags & PG_SWAPINPROG) != 0) {
+ while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
+ mreq->oflags |= VPO_WANTED;
vm_page_lock_queues();
- vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
+ vm_page_flag_set(mreq, PG_REFERENCED);
vm_page_unlock_queues();
cnt.v_intrans++;
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
@@ -1282,9 +1281,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
vm_page_dirty(mreq);
rtvals[i+j] = VM_PAGER_OK;
- vm_page_lock_queues();
- vm_page_flag_set(mreq, PG_SWAPINPROG);
- vm_page_unlock_queues();
+ mreq->oflags |= VPO_SWAPINPROG;
bp->b_pages[j] = mreq;
}
VM_OBJECT_UNLOCK(object);
@@ -1399,7 +1396,7 @@ swp_pager_async_iodone(struct buf *bp)
for (i = 0; i < bp->b_npages; ++i) {
vm_page_t m = bp->b_pages[i];
- vm_page_flag_clear(m, PG_SWAPINPROG);
+ m->oflags &= ~VPO_SWAPINPROG;
if (bp->b_ioflags & BIO_ERROR) {
/*
@@ -1418,7 +1415,7 @@ swp_pager_async_iodone(struct buf *bp)
* not match anything ).
*
* We have to wake specifically requested pages
- * up too because we cleared PG_SWAPINPROG and
+ * up too because we cleared VPO_SWAPINPROG and
* someone may be waiting for that.
*
* NOTE: for reads, m->dirty will probably
@@ -1472,7 +1469,7 @@ swp_pager_async_iodone(struct buf *bp)
/*
* We have to wake specifically requested pages
- * up too because we cleared PG_SWAPINPROG and
+ * up too because we cleared VPO_SWAPINPROG and
* could be waiting for it in getpages. However,
* be sure to not unbusy getpages specifically
* requested page - getpages expects it to be
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 57a6c49af7b9..fd34f409974b 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1153,10 +1153,11 @@ shadowlookup:
goto unlock_tobject;
}
if ((m->flags & PG_BUSY) || m->busy) {
- vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
+ vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
if (object != tobject)
VM_OBJECT_UNLOCK(object);
+ m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0);
VM_OBJECT_LOCK(object);
goto relookup;
@@ -1341,9 +1342,10 @@ vm_object_split(vm_map_entry_t entry)
* not be changed by this operation.
*/
if ((m->flags & PG_BUSY) || m->busy) {
- vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
+ vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(new_object);
+ m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(orig_object), PDROP | PVM, "spltwt", 0);
VM_OBJECT_LOCK(new_object);
VM_OBJECT_LOCK(orig_object);
@@ -1476,10 +1478,10 @@ vm_object_backing_scan(vm_object_t object, int op)
} else if (op & OBSC_COLLAPSE_WAIT) {
if ((p->flags & PG_BUSY) || p->busy) {
vm_page_lock_queues();
- vm_page_flag_set(p,
- PG_WANTED | PG_REFERENCED);
+ vm_page_flag_set(p, PG_REFERENCED);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
+ p->oflags |= VPO_WANTED;
msleep(p, VM_OBJECT_MTX(backing_object),
PDROP | PVM, "vmocol", 0);
VM_OBJECT_LOCK(object);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 00314930e973..c34b6f2c598f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -387,8 +387,8 @@ vm_page_flash(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- if (m->flags & PG_WANTED) {
- vm_page_flag_clear(m, PG_WANTED);
+ if (m->oflags & VPO_WANTED) {
+ m->oflags &= ~VPO_WANTED;
wakeup(m);
}
}
@@ -423,7 +423,6 @@ vm_page_io_finish(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
m->busy--;
if (m->busy == 0)
vm_page_flash(m);
@@ -500,7 +499,7 @@ vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
if (!mtx_owned(&vm_page_queue_mtx))
vm_page_lock_queues();
- vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
+ vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
/*
@@ -510,6 +509,7 @@ vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
* such that even if m->object changes, we can re-lock
* it.
*/
+ m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0);
return (TRUE);
}
@@ -1480,8 +1480,9 @@ retrylookup:
if ((m = vm_page_lookup(object, pindex)) != NULL) {
vm_page_lock_queues();
if (m->busy || (m->flags & PG_BUSY)) {
- vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
+ vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
+ m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(m->object), PVM, "pgrbwt", 0);
if ((allocflags & VM_ALLOC_RETRY) == 0)
return (NULL);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 2a30f3a10056..428aee5322f4 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -116,6 +116,7 @@ struct vm_page {
u_short wire_count; /* wired down maps refs (P) */
u_int cow; /* page cow mapping count */
short hold_count; /* page hold count */
+ u_short oflags; /* page flags (O) */
u_char act_count; /* page usage count */
u_char busy; /* page busy count (O) */
/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
@@ -135,6 +136,15 @@ struct vm_page {
#endif
};
+/*
+ * Page flags stored in oflags:
+ *
+ * Access to these page flags is synchronized by the lock on the object
+ * containing the page (O).
+ */
+#define VPO_WANTED 0x0002 /* someone is waiting for page */
+#define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */
+
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
#if PAGE_SIZE == 32768
#ifdef CTASSERT
@@ -210,14 +220,12 @@ extern struct pq_coloring page_queue_coloring;
* the object, and such pages are also not on any PQ queue.
*/
#define PG_BUSY 0x0001 /* page is in transit (O) */
-#define PG_WANTED 0x0002 /* someone is waiting for page (O) */
#define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */
#define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */
#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
#define PG_ZERO 0x0040 /* page is zeroed */
#define PG_REFERENCED 0x0080 /* page has been referenced */
#define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */
-#define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */
#define PG_NOSYNC 0x0400 /* do not collect for syncer */
#define PG_UNMANAGED 0x0800 /* No PV management for page */
#define PG_MARKER 0x1000 /* special queue marker page */
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 8532bbf2fe76..a8f9912a4b95 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -956,7 +956,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* now tell them that it is ok to use
*/
if (!error) {
- if (mt->flags & PG_WANTED)
+ if (mt->oflags & VPO_WANTED)
vm_page_activate(mt);
else
vm_page_deactivate(mt);