aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2003-02-03 00:05:11 +0000
committerAlan Cox <alc@FreeBSD.org>2003-02-03 00:05:11 +0000
commitca380469a2183243988e25a79a168bedee3463da (patch)
tree27895aed38265a24ae57cc4e15e1c1fbec5404bf
parente61838b12895109cb183cb78ac1119991ab45847 (diff)
downloadsrc-ca380469a2183243988e25a79a168bedee3463da.tar.gz
src-ca380469a2183243988e25a79a168bedee3463da.zip
- Make allpmaps static.
- Use atomic subtract to update the global wired pages count. (See also vm/vm_page.c revision 1.233.) - Assert that the page queue lock is held in pmap_remove_entry().
Notes
Notes: svn path=/head/; revision=110254
-rw-r--r--sys/amd64/amd64/pmap.c7
-rw-r--r--sys/i386/i386/pmap.c7
2 files changed, 8 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 37e0eb8a19fd..40d97b817d42 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -151,7 +151,7 @@ static int protection_codes[8];
struct pmap kernel_pmap_store;
LIST_HEAD(pmaplist, pmap);
-struct pmaplist allpmaps;
+static struct pmaplist allpmaps;
vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
@@ -1180,7 +1180,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
if (m->wire_count == 0) {
vm_page_busy(m);
vm_page_free_zero(m);
- --cnt.v_wire_count;
+ atomic_subtract_int(&cnt.v_wire_count, 1);
}
return 1;
}
@@ -1353,7 +1353,7 @@ pmap_release_free_page(pmap_t pmap, vm_page_t p)
pmap->pm_ptphint = NULL;
p->wire_count--;
- cnt.v_wire_count--;
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(p);
vm_page_unlock_queues();
return 1;
@@ -1639,6 +1639,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
int s;
s = splvm();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 37e0eb8a19fd..40d97b817d42 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -151,7 +151,7 @@ static int protection_codes[8];
struct pmap kernel_pmap_store;
LIST_HEAD(pmaplist, pmap);
-struct pmaplist allpmaps;
+static struct pmaplist allpmaps;
vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
@@ -1180,7 +1180,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
if (m->wire_count == 0) {
vm_page_busy(m);
vm_page_free_zero(m);
- --cnt.v_wire_count;
+ atomic_subtract_int(&cnt.v_wire_count, 1);
}
return 1;
}
@@ -1353,7 +1353,7 @@ pmap_release_free_page(pmap_t pmap, vm_page_t p)
pmap->pm_ptphint = NULL;
p->wire_count--;
- cnt.v_wire_count--;
+ atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(p);
vm_page_unlock_queues();
return 1;
@@ -1639,6 +1639,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
int s;
s = splvm();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (pmap == pv->pv_pmap && va == pv->pv_va)