aboutsummaryrefslogtreecommitdiff
path: root/sys/i386
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2006-04-12 04:22:52 +0000
committerAlan Cox <alc@FreeBSD.org>2006-04-12 04:22:52 +0000
commit826c207263eb927231ec49a23407424eff7bad88 (patch)
treeeeb6525f020e547b58a26d7c6e3b96038c99d223 /sys/i386
parent8511b981f65035ef4125e41addccd882e7179bcf (diff)
downloadsrc-826c207263eb927231ec49a23407424eff7bad88.tar.gz
src-826c207263eb927231ec49a23407424eff7bad88.zip
Retire pmap_track_modified(). We no longer need it because we do not
create managed mappings within the clean submap. To prevent regressions, add assertions blocking the creation of managed mappings within the clean submap. Reviewed by: tegge
Notes
Notes: svn path=/head/; revision=157680
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c52
1 files changed, 10 insertions, 42 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 8b7cba1bc2c4..96a399db7888 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -512,20 +512,6 @@ pmap_init(void)
* Low level helper routines.....
***************************************************/
-
-/*
- * this routine defines the region(s) of memory that should
- * not be tested for the modified bit.
- */
-static PMAP_INLINE int
-pmap_track_modified(vm_offset_t va)
-{
- if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
- return 1;
- else
- return 0;
-}
-
#ifdef SMP
/*
* For SMP, these functions have to use the IPI mechanism for coherence.
@@ -1512,8 +1498,7 @@ retry:
KASSERT((tpte & PG_RW),
("get_pv_entry: modified page not writable: va: %#x, pte: %#jx",
va, (uintmax_t)tpte));
- if (pmap_track_modified(va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
}
pmap_invalidate_page(pmap, va);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
@@ -1638,8 +1623,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
KASSERT((oldpte & PG_RW),
("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx",
va, (uintmax_t)oldpte));
- if (pmap_track_modified(va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
}
if (oldpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
@@ -1810,8 +1794,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((tpte & PG_RW),
("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx",
pv->pv_va, (uintmax_t)tpte));
- if (pmap_track_modified(pv->pv_va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
}
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
@@ -1895,8 +1878,7 @@ retry:
vm_page_flag_set(m, PG_REFERENCED);
pbits &= ~PG_A;
}
- if ((pbits & PG_M) != 0 &&
- pmap_track_modified(sva)) {
+ if ((pbits & PG_M) != 0) {
if (m == NULL)
m = PHYS_TO_VM_PAGE(*pte);
vm_page_dirty(m);
@@ -2058,6 +2040,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Enter on the PV list if part of our managed memory.
*/
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+ ("pmap_enter: managed mapping within the clean submap"));
pmap_insert_entry(pmap, va, m);
pa |= PG_MANAGED;
}
@@ -2100,8 +2084,7 @@ validate:
KASSERT((origpte & PG_RW),
("pmap_enter: modified page not writable: va: %#x, pte: %#jx",
va, (uintmax_t)origpte));
- if ((origpte & PG_MANAGED) &&
- pmap_track_modified(va))
+ if ((origpte & PG_MANAGED) != 0)
vm_page_dirty(om);
if ((prot & VM_PROT_WRITE) == 0)
invlva = TRUE;
@@ -2132,6 +2115,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pt_entry_t *pte;
vm_paddr_t pa;
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
+ ("pmap_enter_quick: managed mapping within the clean submap"));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
@@ -2705,13 +2691,6 @@ pmap_is_modified(vm_page_t m)
sched_pin();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- /*
- * if the bit being tested is the modified bit, then
- * mark clean_map and ptes as never
- * modified.
- */
- if (!pmap_track_modified(pv->pv_va))
- continue;
PMAP_LOCK(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
rv = (*pte & PG_M) != 0;
@@ -2767,14 +2746,6 @@ pmap_clear_ptes(vm_page_t m, int bit)
* setting RO do we need to clear the VAC?
*/
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- /*
- * don't write protect pager mappings
- */
- if (bit == PG_RW) {
- if (!pmap_track_modified(pv->pv_va))
- continue;
- }
-
PMAP_LOCK(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
retry:
@@ -2857,9 +2828,6 @@ pmap_ts_referenced(vm_page_t m)
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
- if (!pmap_track_modified(pv->pv_va))
- continue;
-
PMAP_LOCK(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);