aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/sparc64/sparc64/pmap.c1297
1 files changed, 867 insertions, 430 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index fe0e6eeea102..723bd46da5dc 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -66,6 +66,7 @@
#include "opt_msgbuf.h"
#include <sys/param.h>
+#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/msgbuf.h>
@@ -87,7 +88,9 @@
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
+#include <machine/cache.h>
#include <machine/frame.h>
+#include <machine/md_var.h>
#include <machine/pv.h>
#include <machine/tlb.h>
#include <machine/tte.h>
@@ -95,11 +98,9 @@
#define PMAP_DEBUG
-#define PMAP_LOCK(pm)
-#define PMAP_UNLOCK(pm)
-
-#define dcache_global_flush(pa)
-#define icache_global_flush(pa)
+#ifndef PMAP_SHPGPERPROC
+#define PMAP_SHPGPERPROC 200
+#endif
struct mem_region {
vm_offset_t mr_start;
@@ -124,12 +125,15 @@ vm_offset_t msgbuf_phys;
vm_offset_t avail_start;
vm_offset_t avail_end;
+int pmap_pagedaemon_waken;
+
/*
* Map of physical memory reagions.
*/
vm_offset_t phys_avail[128];
static struct mem_region mra[128];
-static struct ofw_map oma[128];
+static struct ofw_map translations[128];
+static int translations_size;
/*
* First and last available kernel virtual addresses.
@@ -139,10 +143,15 @@ vm_offset_t virtual_end;
vm_offset_t kernel_vm_end;
/*
- * Kernel pmap handle and associated storage.
+ * The locked kernel page the kernel binary was loaded into. This will need
+ * to become a list later.
+ */
+vm_offset_t kernel_page;
+
+/*
+ * Kernel pmap.
*/
-pmap_t kernel_pmap;
-static struct pmap kernel_pmap_store;
+struct pmap kernel_pmap_store;
/*
* Map of free and in use hardware contexts and index of first potentially
@@ -152,12 +161,24 @@ static char pmap_context_map[PMAP_CONTEXT_MAX];
static u_int pmap_context_base;
/*
- * Virtual addresses of free space for temporary mappings. Used for copying
- * and zeroing physical pages.
+ * Virtual addresses of free space for temporary mappings. Used for copying,
+ * zeroing and mapping physical pages for /dev/mem accesses.
*/
static vm_offset_t CADDR1;
static vm_offset_t CADDR2;
+static boolean_t pmap_initialized = FALSE;
+
+/* Convert a tte data field into a page mask */
+static vm_offset_t pmap_page_masks[] = {
+ PAGE_MASK_8K,
+ PAGE_MASK_64K,
+ PAGE_MASK_512K,
+ PAGE_MASK_4M
+};
+
+#define PMAP_TD_GET_MASK(d) pmap_page_masks[TD_GET_SIZE((d))]
+
/*
* Allocate and free hardware context numbers.
*/
@@ -173,12 +194,19 @@ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
* Quick sort callout for comparing memory regions.
*/
static int mr_cmp(const void *a, const void *b);
+static int om_cmp(const void *a, const void *b);
static int
mr_cmp(const void *a, const void *b)
{
return ((const struct mem_region *)a)->mr_start -
((const struct mem_region *)b)->mr_start;
}
+static int
+om_cmp(const void *a, const void *b)
+{
+ return ((const struct ofw_map *)a)->om_start -
+ ((const struct ofw_map *)b)->om_start;
+}
/*
* Bootstrap the system enough to run with virtual memory.
@@ -187,8 +215,8 @@ void
pmap_bootstrap(vm_offset_t ekva)
{
struct pmap *pm;
- struct stte *stp;
struct tte tte;
+ struct tte *tp;
vm_offset_t off;
vm_offset_t pa;
vm_offset_t va;
@@ -205,6 +233,10 @@ pmap_bootstrap(vm_offset_t ekva)
virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
virtual_end = VM_MAX_KERNEL_ADDRESS;
+ /* Look up the page the kernel binary was loaded into. */
+ kernel_page = TD_GET_PA(ldxa(TLB_DAR_SLOT(TLB_SLOT_KERNEL),
+ ASI_DTLB_DATA_ACCESS_REG));
+
/*
* Find out what physical memory is available from the prom and
* initialize the phys_avail array. This must be done before
@@ -223,7 +255,7 @@ pmap_bootstrap(vm_offset_t ekva)
panic("pmap_bootstrap: getprop /memory/available");
sz /= sizeof(*mra);
CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
- qsort(mra, sz, sizeof *mra, mr_cmp);
+ qsort(mra, sz, sizeof (*mra), mr_cmp);
for (i = 0, j = 0; i < sz; i++, j += 2) {
CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
mra[i].mr_size);
@@ -238,7 +270,7 @@ pmap_bootstrap(vm_offset_t ekva)
if (pa & PAGE_MASK_4M)
panic("pmap_bootstrap: tsb unaligned\n");
tsb_kernel_phys = pa;
- tsb_kernel = (struct stte *)virtual_avail;
+ tsb_kernel = (struct tte *)virtual_avail;
virtual_avail += KVA_PAGES * PAGE_SIZE_4M;
for (i = 0; i < KVA_PAGES; i++) {
va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M;
@@ -253,12 +285,10 @@ pmap_bootstrap(vm_offset_t ekva)
/*
* Load the tsb registers.
*/
- stxa(AA_DMMU_TSB, ASI_DMMU,
- (vm_offset_t)tsb_kernel >> (STTE_SHIFT - TTE_SHIFT));
- stxa(AA_IMMU_TSB, ASI_IMMU,
- (vm_offset_t)tsb_kernel >> (STTE_SHIFT - TTE_SHIFT));
+ stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel);
+ stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel);
membar(Sync);
- flush(va);
+ flush(tsb_kernel);
/*
* Allocate the message buffer.
@@ -272,29 +302,32 @@ pmap_bootstrap(vm_offset_t ekva)
panic("pmap_bootstrap: finddevice /virtual-memory");
if ((sz = OF_getproplen(vmem, "translations")) == -1)
panic("pmap_bootstrap: getproplen translations");
- if (sizeof(oma) < sz)
- panic("pmap_bootstrap: oma too small");
- bzero(oma, sz);
- if (OF_getprop(vmem, "translations", oma, sz) == -1)
+ if (sizeof(translations) < sz)
+ panic("pmap_bootstrap: translations too small");
+ bzero(translations, sz);
+ if (OF_getprop(vmem, "translations", translations, sz) == -1)
panic("pmap_bootstrap: getprop /virtual-memory/translations");
- sz /= sizeof(*oma);
+ sz /= sizeof(*translations);
+ translations_size = sz;
CTR0(KTR_PMAP, "pmap_bootstrap: translations");
+ qsort(translations, sz, sizeof (*translations), om_cmp);
for (i = 0; i < sz; i++) {
CTR4(KTR_PMAP,
"translation: start=%#lx size=%#lx tte=%#lx pa=%#lx",
- oma[i].om_start, oma[i].om_size, oma[i].om_tte,
- TD_PA(oma[i].om_tte));
- if (oma[i].om_start < 0xf0000000) /* XXX!!! */
+ translations[i].om_start, translations[i].om_size,
+ translations[i].om_tte, TD_GET_PA(translations[i].om_tte));
+ if (translations[i].om_start < 0xf0000000) /* XXX!!! */
continue;
- for (off = 0; off < oma[i].om_size; off += PAGE_SIZE) {
- va = oma[i].om_start + off;
- tte.tte_data = oma[i].om_tte + off;
+ for (off = 0; off < translations[i].om_size;
+ off += PAGE_SIZE) {
+ va = translations[i].om_start + off;
+ tte.tte_data = translations[i].om_tte + off;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
- stp = tsb_kvtostte(va);
+ tp = tsb_kvtotte(va);
CTR4(KTR_PMAP,
- "mapping: va=%#lx stp=%p tte=%#lx pa=%#lx",
- va, stp, tte.tte_data, TD_PA(tte.tte_data));
- stp->st_tte = tte;
+ "mapping: va=%#lx tp=%p tte=%#lx pa=%#lx",
+ va, tp, tte.tte_data, TD_GET_PA(tte.tte_data));
+ *tp = tte;
}
}
@@ -305,6 +338,7 @@ pmap_bootstrap(vm_offset_t ekva)
for (i = 0; phys_avail[i + 2] != 0; i += 2)
;
avail_end = phys_avail[i + 1];
+ Maxmem = sparc64_btop(avail_end);
/*
* Allocate virtual address space for copying and zeroing pages of
@@ -322,28 +356,17 @@ pmap_bootstrap(vm_offset_t ekva)
virtual_avail += round_page(MSGBUF_SIZE);
/*
- * Allocate physical memory for the heads of the stte alias chains.
- */
- sz = round_page(((avail_end - avail_start) >> PAGE_SHIFT) *
- sizeof(struct pv_head));
- pv_table = pmap_bootstrap_alloc(sz);
- /* XXX */
- avail_start += sz;
- for (i = 0; i < sz; i += sizeof(struct pv_head))
- pvh_set_first(pv_table + i, 0);
-
- /*
* Initialize the kernel pmap (which is statically allocated).
*/
- pm = &kernel_pmap_store;
+ pm = kernel_pmap;
pm->pm_context = TLB_CTX_KERNEL;
pm->pm_active = ~0;
pm->pm_count = 1;
- kernel_pmap = pm;
+ TAILQ_INIT(&pm->pm_pvlist);
/*
* Set the secondary context to be the kernel context (needed for
- * fp block operations in the kernel).
+ * fp block operations in the kernel and the cache code).
*/
stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
membar(Sync);
@@ -372,55 +395,148 @@ pmap_bootstrap_alloc(vm_size_t size)
}
/*
- * Allocate a hardware context number from the context map.
+ * Initialize the pmap module.
*/
-static u_int
-pmap_context_alloc(void)
+void
+pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
{
- u_int i;
+ vm_offset_t addr;
+ vm_size_t size;
+ int result;
+ int i;
- i = pmap_context_base;
- do {
- if (pmap_context_map[i] == 0) {
- pmap_context_map[i] = 1;
- pmap_context_base = (i + 1) & (PMAP_CONTEXT_MAX - 1);
- return (i);
- }
- } while ((i = (i + 1) & (PMAP_CONTEXT_MAX - 1)) != pmap_context_base);
- panic("pmap_context_alloc");
+ for (i = 0; i < vm_page_array_size; i++) {
+ vm_page_t m;
+
+ m = &vm_page_array[i];
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_list_count = 0;
+ }
+
+ for (i = 0; i < translations_size; i++) {
+ addr = translations[i].om_start;
+ size = translations[i].om_size;
+ if (addr < 0xf0000000) /* XXX */
+ continue;
+ result = vm_map_find(kernel_map, NULL, 0, &addr, size, TRUE,
+ VM_PROT_ALL, VM_PROT_ALL, 0);
+ if (result != KERN_SUCCESS || addr != translations[i].om_start)
+ panic("pmap_init: vm_map_find");
+ }
+
+ pvzone = &pvzone_store;
+ pvinit = (struct pv_entry *)kmem_alloc(kernel_map,
+ vm_page_array_size * sizeof (struct pv_entry));
+ zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
+ vm_page_array_size);
+ pmap_initialized = TRUE;
}
/*
- * Free a hardware context number back to the context map.
+ * Initialize the address space (zone) for the pv_entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
*/
-static void
-pmap_context_destroy(u_int i)
+void
+pmap_init2(void)
{
+ int shpgperproc;
- pmap_context_map[i] = 0;
+ shpgperproc = PMAP_SHPGPERPROC;
+ TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
+ pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
+ pv_entry_high_water = 9 * (pv_entry_max / 10);
+ zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
}
/*
- * Map a range of physical addresses into kernel virtual address space.
- *
- * The value passed in *virt is a suggested virtual address for the mapping.
- * Architectures which can support a direct-mapped physical to virtual region
- * can return the appropriate address within that region, leaving '*virt'
- * unchanged. We cannot and therefore do not; *virt is updated with the
- * first usable address after the mapped region.
+ * Extract the physical page address associated with the given
+ * map/virtual_address pair.
*/
vm_offset_t
-pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
+pmap_extract(pmap_t pm, vm_offset_t va)
{
- vm_offset_t sva;
- vm_offset_t va;
+ struct tte *tp;
+ u_long d;
- sva = *virt;
- va = sva;
- for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
- pmap_kenter(va, pa_start);
- *virt = va;
- return (sva);
+ if (pm == kernel_pmap)
+ return (pmap_kextract(va));
+ tp = tsb_tte_lookup(pm, va);
+ if (tp == NULL)
+ return (0);
+ else {
+ d = tp->tte_data;
+ return (TD_GET_PA(d) | (va & PMAP_TD_GET_MASK(d)));
+ }
+}
+
+/*
+ * Extract the physical page address associated with the given kernel virtual
+ * address.
+ */
+vm_offset_t
+pmap_kextract(vm_offset_t va)
+{
+ struct tte *tp;
+ u_long d;
+
+ if (va >= KERNBASE && va < KERNBASE + PAGE_SIZE_4M)
+ return (kernel_page + (va & PAGE_MASK_4M));
+ tp = tsb_kvtotte(va);
+ d = tp->tte_data;
+ if ((d & TD_V) == 0)
+ return (0);
+ return (TD_GET_PA(d) | (va & PMAP_TD_GET_MASK(d)));
+}
+
+int
+pmap_cache_enter(vm_page_t m, vm_offset_t va)
+{
+ struct tte *tp;
+ vm_offset_t pa;
+ pv_entry_t pv;
+ int c;
+ int i;
+
+ CTR2(KTR_PMAP, "pmap_cache_enter: m=%p va=%#lx", m, va);
+ for (i = 0, c = 0; i < DCACHE_COLORS; i++) {
+ if (i != DCACHE_COLOR(va))
+ c += m->md.colors[i];
+ }
+ m->md.colors[DCACHE_COLOR(va)]++;
+ if (c == 0) {
+ CTR0(KTR_PMAP, "pmap_cache_enter: cacheable");
+ return (1);
+ }
+ else if (c != 1) {
+ CTR0(KTR_PMAP, "pmap_cache_enter: already uncacheable");
+ return (0);
+ }
+ CTR0(KTR_PMAP, "pmap_cache_enter: marking uncacheable");
+ if ((m->flags & PG_UNMANAGED) != 0)
+ panic("pmap_cache_enter: non-managed page");
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL) {
+ atomic_clear_long(&tp->tte_data, TD_CV);
+ tlb_page_demap(TLB_DTLB | TLB_ITLB,
+ pv->pv_pmap->pm_context, pv->pv_va);
+ }
+ }
+ pa = VM_PAGE_TO_PHYS(m);
+ dcache_inval_phys(pa, pa + PAGE_SIZE);
+ return (0);
+}
+
+void
+pmap_cache_remove(vm_page_t m, vm_offset_t va)
+{
+
+ CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
+ m->md.colors[DCACHE_COLOR(va)]);
+ KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
+ ("pmap_cache_remove: no mappings %d <= 0",
+ m->md.colors[DCACHE_COLOR(va)]));
+ m->md.colors[DCACHE_COLOR(va)]--;
}
/*
@@ -429,16 +545,18 @@ pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
void
pmap_kenter(vm_offset_t va, vm_offset_t pa)
{
- struct stte *stp;
struct tte tte;
+ struct tte *tp;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_REF | TD_SW | TD_CP | TD_P | TD_W;
- stp = tsb_kvtostte(va);
- CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx stp=%p data=%#lx",
- va, pa, stp, stp->st_tte.tte_data);
- stp->st_tte = tte;
+ tp = tsb_kvtotte(va);
+ CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
+ va, pa, tp, tp->tte_data);
+ if ((tp->tte_data & TD_V) != 0)
+ tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
+ *tp = tte;
}
/*
@@ -449,18 +567,18 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
void
pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags)
{
- struct stte *stp;
struct tte tte;
+ struct tte *tp;
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
- if ((flags & TD_W) != 0)
- flags |= TD_SW;
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_REF | TD_P | flags;
- stp = tsb_kvtostte(va);
- CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx stp=%p data=%#lx",
- va, pa, stp, stp->st_tte.tte_data);
- stp->st_tte = tte;
+ tp = tsb_kvtotte(va);
+ CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
+ va, pa, tp, tp->tte_data);
+ if ((tp->tte_data & TD_V) != 0)
+ tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
+ *tp = tte;
}
/*
@@ -480,12 +598,38 @@ pmap_kenter_temporary(vm_offset_t pa, int i)
void
pmap_kremove(vm_offset_t va)
{
- struct stte *stp;
+ struct tte *tp;
+
+ tp = tsb_kvtotte(va);
+ CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
+ tp->tte_data);
+ atomic_clear_long(&tp->tte_data, TD_V);
+ tp->tte_tag = 0;
+ tp->tte_data = 0;
+ tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
+}
- stp = tsb_kvtostte(va);
- CTR3(KTR_PMAP, "pmap_kremove: va=%#lx stp=%p data=%#lx", va, stp,
- stp->st_tte.tte_data);
- tsb_stte_remove(stp);
+/*
+ * Map a range of physical addresses into kernel virtual address space.
+ *
+ * The value passed in *virt is a suggested virtual address for the mapping.
+ * Architectures which can support a direct-mapped physical to virtual region
+ * can return the appropriate address within that region, leaving '*virt'
+ * unchanged. We cannot and therefore do not; *virt is updated with the
+ * first usable address after the mapped region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
+{
+ vm_offset_t sva;
+ vm_offset_t va;
+
+ sva = *virt;
+ va = sva;
+ for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
+ pmap_kenter(va, pa_start);
+ *virt = va;
+ return (sva);
}
/*
@@ -516,154 +660,8 @@ pmap_qremove(vm_offset_t va, int count)
}
/*
- * Map the given physical page at the specified virtual address in the
- * target pmap with the protection requested. If specified the page
- * will be wired down.
- */
-void
-pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
-{
- struct stte *stp;
- struct tte tte;
- vm_offset_t pa;
- u_long data;
-
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_enter: non current pmap"));
- pa = VM_PAGE_TO_PHYS(m);
- CTR5(KTR_PMAP, "pmap_enter: ctx=%p va=%#lx pa=%#lx prot=%#x wired=%d",
- pm->pm_context, va, pa, prot, wired);
- tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
- tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
- TD_CP | TD_CV;
- if (pm->pm_context == TLB_CTX_KERNEL)
- tte.tte_data |= TD_P;
- if (wired == TRUE) {
- tte.tte_data |= TD_REF;
- if (prot & VM_PROT_WRITE)
- tte.tte_data |= TD_W;
- }
- if (prot & VM_PROT_WRITE)
- tte.tte_data |= TD_SW;
- if (prot & VM_PROT_EXECUTE) {
- tte.tte_data |= TD_EXEC;
- icache_global_flush(pa);
- }
- if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
- tte.tte_data |= TD_MNG;
-
- PMAP_LOCK(pm);
- if ((stp = tsb_stte_lookup(pm, va)) != NULL) {
- data = stp->st_tte.tte_data;
- if (TD_PA(data) == pa) {
- if (prot & VM_PROT_WRITE)
- tte.tte_data |= TD_W;
- CTR3(KTR_PMAP,
- "pmap_enter: update pa=%#lx data=%#lx to %#lx",
- pa, data, tte.tte_data);
- }
- if (stp->st_tte.tte_data & TD_MNG)
- pv_remove_virt(stp);
- tsb_stte_remove(stp);
- if (tte.tte_data & TD_MNG)
- pv_insert(pm, pa, va, stp);
- stp->st_tte = tte;
- } else {
- tsb_tte_enter(pm, va, tte);
- }
- PMAP_UNLOCK(pm);
-}
-
-void
-pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
-{
- struct stte *stp;
-
- CTR3(KTR_PMAP, "pmap_remove: pm=%p start=%#lx end=%#lx",
- pm, start, end);
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_remove: non current pmap"));
- PMAP_LOCK(pm);
- for (; start < end; start += PAGE_SIZE) {
- if ((stp = tsb_stte_lookup(pm, start)) == NULL)
- continue;
- if (stp->st_tte.tte_data & TD_MNG)
- pv_remove_virt(stp);
- tsb_stte_remove(stp);
- }
- PMAP_UNLOCK(pm);
-}
-
-/*
- * Initialize the pmap module.
- */
-void
-pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
-{
-}
-
-void
-pmap_init2(void)
-{
-}
-
-/*
- * Initialize the pmap associated with process 0.
- */
-void
-pmap_pinit0(pmap_t pm)
-{
-
- pm = &kernel_pmap_store;
- pm->pm_context = pmap_context_alloc();
- pm->pm_active = 0;
- pm->pm_count = 1;
- bzero(&pm->pm_stats, sizeof(pm->pm_stats));
-}
-
-/*
- * Initialize a preallocated and zeroed pmap structure.
- */
-void
-pmap_pinit(pmap_t pm)
-{
- struct stte *stp;
-
- pm->pm_object = vm_object_allocate(OBJT_DEFAULT, 16);
- pm->pm_context = pmap_context_alloc();
- pm->pm_active = 0;
- pm->pm_count = 1;
- stp = &pm->pm_stte;
- stp->st_tte = tsb_page_alloc(pm, (vm_offset_t)tsb_base(0));
- bzero(&pm->pm_stats, sizeof(pm->pm_stats));
-}
-
-void
-pmap_pinit2(pmap_t pmap)
-{
-}
-
-/*
- * Grow the number of kernel page table entries. Unneeded.
- */
-void
-pmap_growkernel(vm_offset_t addr)
-{
-}
-
-/*
- * Make the specified page pageable (or not). Unneeded.
- */
-void
-pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
- boolean_t pageable)
-{
-}
-
-/*
- * Create the user structure for a new process. This
- * routine directly affects the performance of fork().
+ * Create the uarea for a new process.
+ * This routine directly affects the fork perf for a process.
*/
void
pmap_new_proc(struct proc *p)
@@ -673,11 +671,18 @@ pmap_new_proc(struct proc *p)
vm_page_t m;
u_int i;
+ /*
+ * Allocate object for the upages.
+ */
upobj = p->p_upages_obj;
if (upobj == NULL) {
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
p->p_upages_obj = upobj;
}
+
+ /*
+ * Get a kernel virtual address for the U area for this process.
+ */
up = (vm_offset_t)p->p_uarea;
if (up == 0) {
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
@@ -685,11 +690,24 @@ pmap_new_proc(struct proc *p)
panic("pmap_new_proc: upage allocation failed");
p->p_uarea = (struct user *)up;
}
+
for (i = 0; i < UAREA_PAGES; i++) {
+ /*
+ * Get a uarea page.
+ */
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ /*
+ * Wire the page.
+ */
m->wire_count++;
cnt.v_wire_count++;
+
+ /*
+ * Enter the page into the kernel address space.
+ */
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
+
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
@@ -697,6 +715,10 @@ pmap_new_proc(struct proc *p)
}
}
+/*
+ * Dispose the uarea for a process that has exited.
+ * This routine directly impacts the exit perf of a process.
+ */
void
pmap_dispose_proc(struct proc *p)
{
@@ -719,7 +741,7 @@ pmap_dispose_proc(struct proc *p)
}
/*
- * Allow the UPAGES for a process to be prejudicially paged out.
+ * Allow the uarea for a process to be prejudicially paged out.
*/
void
pmap_swapout_proc(struct proc *p)
@@ -742,7 +764,7 @@ pmap_swapout_proc(struct proc *p)
}
/*
- * Bring the UPAGES for a specified process back in.
+ * Bring the uarea for a specified process back in.
*/
void
pmap_swapin_proc(struct proc *p)
@@ -772,8 +794,9 @@ pmap_swapin_proc(struct proc *p)
}
/*
- * Create the kernel stack for a new thread. This
- * routine directly affects the performance of fork().
+ * Create the kernel stack and pcb for a new thread.
+ * This routine directly affects the fork perf for a process and
+ * create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
@@ -783,26 +806,46 @@ pmap_new_thread(struct thread *td)
vm_page_t m;
u_int i;
+ /*
+ * Allocate object for the kstack,
+ */
ksobj = td->td_kstack_obj;
if (ksobj == NULL) {
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
td->td_kstack_obj = ksobj;
}
+
+ /*
+ * Get a kernel virtual address for the kstack for this thread.
+ */
ks = td->td_kstack;
if (ks == 0) {
ks = kmem_alloc_nofault(kernel_map,
(KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
- /* XXX remove from tlb */
+ tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
}
+
for (i = 0; i < KSTACK_PAGES; i++) {
+ /*
+ * Get a kernel stack page.
+ */
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ /*
+ * Wire the page.
+ */
m->wire_count++;
cnt.v_wire_count++;
+
+ /*
+ * Enter the page into the kernel address space.
+ */
pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
+
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
@@ -810,6 +853,10 @@ pmap_new_thread(struct thread *td)
}
}
+/*
+ * Dispose the kernel stack for a thread that has exited.
+ * This routine directly impacts the exit perf of a process and thread.
+ */
void
pmap_dispose_thread(struct thread *td)
{
@@ -823,7 +870,7 @@ pmap_dispose_thread(struct thread *td)
for (i = 0; i < KSTACK_PAGES; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
- panic("pmap_dispose_proc: upage already missing?");
+ panic("pmap_dispose_proc: kstack already missing?");
vm_page_busy(m);
pmap_kremove(ks + i * PAGE_SIZE);
vm_page_unwire(m, 0);
@@ -884,273 +931,564 @@ pmap_swapin_thread(struct thread *td)
}
}
+/*
+ * Initialize the pmap associated with process 0.
+ */
void
-pmap_page_protect(vm_page_t m, vm_prot_t prot)
+pmap_pinit0(pmap_t pm)
{
- if (m->flags & PG_FICTITIOUS || prot & VM_PROT_WRITE)
- return;
- if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
- pv_bit_clear(m, TD_W | TD_SW);
- else
- pv_global_remove_all(m);
+ pm->pm_context = pmap_context_alloc();
+ pm->pm_active = 0;
+ pm->pm_count = 1;
+ pm->pm_tsb = NULL;
+ pm->pm_tsb_obj = NULL;
+ pm->pm_tsb_tte = NULL;
+ TAILQ_INIT(&pm->pm_pvlist);
+ bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
+/*
+ * Initialize a preallocated and zeroed pmap structure, uch as one in a
+ * vmspace structure.
+ */
void
-pmap_clear_modify(vm_page_t m)
+pmap_pinit(pmap_t pm)
{
+ vm_page_t m;
- if (m->flags & PG_FICTITIOUS)
- return;
- pv_bit_clear(m, TD_W);
-}
+ /*
+ * Allocate kva space for the tsb.
+ */
+ if (pm->pm_tsb == NULL) {
+ pm->pm_tsb = (struct tte *)kmem_alloc_pageable(kernel_map,
+ PAGE_SIZE_8K);
+ pm->pm_tsb_tte = tsb_kvtotte((vm_offset_t)pm->pm_tsb);
+ }
-boolean_t
-pmap_is_modified(vm_page_t m)
-{
+ /*
+ * Allocate an object for it.
+ */
+ if (pm->pm_tsb_obj == NULL)
+ pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, 1);
- if (m->flags & PG_FICTITIOUS)
- return FALSE;
- return (pv_bit_test(m, TD_W));
-}
+ /*
+ * Allocate the tsb page.
+ */
+ m = vm_page_grab(pm->pm_tsb_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_ZERO);
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(VM_PAGE_TO_PHYS(m));
-void
-pmap_clear_reference(vm_page_t m)
-{
+ m->wire_count++;
+ cnt.v_wire_count++;
- if (m->flags & PG_FICTITIOUS)
- return;
- pv_bit_clear(m, TD_REF);
+ vm_page_flag_clear(m, PG_MAPPED | PG_BUSY);
+ m->valid = VM_PAGE_BITS_ALL;
+
+ pmap_kenter((vm_offset_t)pm->pm_tsb, VM_PAGE_TO_PHYS(m));
+
+ pm->pm_active = 0;
+ pm->pm_context = pmap_context_alloc();
+ pm->pm_count = 1;
+ TAILQ_INIT(&pm->pm_pvlist);
+ bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
-int
-pmap_ts_referenced(vm_page_t m)
+void
+pmap_pinit2(pmap_t pmap)
{
-
- if (m->flags & PG_FICTITIOUS)
- return (0);
- return (pv_bit_count(m, TD_REF));
+ /* XXX: Remove this stub when no longer called */
}
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
void
-pmap_activate(struct thread *td)
+pmap_release(pmap_t pm)
{
- struct vmspace *vm;
- struct stte *stp;
- struct proc *p;
- pmap_t pm;
+#ifdef INVARIANTS
+ pv_entry_t pv;
+#endif
+ vm_object_t obj;
+ vm_page_t m;
- p = td->td_proc;
- vm = p->p_vmspace;
- pm = &vm->vm_pmap;
- stp = &pm->pm_stte;
- KASSERT(stp->st_tte.tte_data & TD_V,
- ("pmap_copy: dst_pmap not initialized"));
- tlb_store_slot(TLB_DTLB, (vm_offset_t)tsb_base(0), TLB_CTX_KERNEL,
- stp->st_tte, tsb_tlb_slot(0));
- if ((stp->st_tte.tte_data & TD_INIT) == 0) {
- tsb_page_init(tsb_base(0), 0);
- stp->st_tte.tte_data |= TD_INIT;
+ CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", pm->pm_context,
+ pm->pm_tsb);
+ obj = pm->pm_tsb_obj;
+ KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
+#ifdef INVARIANTS
+ if (!TAILQ_EMPTY(&pm->pm_pvlist)) {
+ TAILQ_FOREACH(pv, &pm->pm_pvlist, pv_plist) {
+ CTR3(KTR_PMAP, "pmap_release: m=%p va=%#lx pa=%#lx",
+ pv->pv_m, pv->pv_va, VM_PAGE_TO_PHYS(pv->pv_m));
+ }
+ panic("pmap_release: leaking pv entries");
}
- stxa(AA_DMMU_PCXR, ASI_DMMU, pm->pm_context);
- membar(Sync);
+#endif
+ KASSERT(pmap_resident_count(pm) == 0,
+ ("pmap_release: resident pages %ld != 0",
+ pmap_resident_count(pm)));
+ m = TAILQ_FIRST(&obj->memq);
+ pmap_context_destroy(pm->pm_context);
+ if (vm_page_sleep_busy(m, FALSE, "pmaprl"))
+ return;
+ vm_page_busy(m);
+ KASSERT(m->hold_count == 0, ("pmap_release: freeing held tsb page"));
+ pmap_kremove((vm_offset_t)pm->pm_tsb);
+ m->wire_count--;
+ cnt.v_wire_count--;
+ vm_page_free_zero(m);
}
-vm_offset_t
-pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
+/*
+ * Grow the number of kernel page table entries. Unneeded.
+ */
+void
+pmap_growkernel(vm_offset_t addr)
{
+}
- return (va);
+/*
+ * Retire the given physical map from service. Pmaps are always allocated
+ * as part of a larger structure, so this never happens.
+ */
+void
+pmap_destroy(pmap_t pm)
+{
+ panic("pmap_destroy: unimplemented");
}
+/*
+ * Add a reference to the specified pmap.
+ */
void
-pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_reference(pmap_t pm)
{
- /* XXX */
+ if (pm != NULL)
+ pm->pm_count++;
}
+/*
+ * This routine is very drastic, but can save the system
+ * in a pinch.
+ */
void
pmap_collect(void)
{
+ static int warningdone;
+ vm_page_t m;
+ int i;
+
+ if (pmap_pagedaemon_waken == 0)
+ return;
+ if (warningdone++ < 5)
+ printf("pmap_collect: collecting pv entries -- suggest"
+ "increasing PMAP_SHPGPERPROC\n");
+ for (i = 0; i < vm_page_array_size; i++) {
+ m = &vm_page_array[i];
+ if (m->wire_count || m->hold_count || m->busy ||
+ (m->flags & (PG_BUSY | PG_UNMANAGED)))
+ continue;
+ pv_remove_all(m);
+ }
+ pmap_pagedaemon_waken = 0;
}
+/*
+ * Remove the given range of addresses from the specified map.
+ */
void
-pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
- vm_size_t len, vm_offset_t src_addr)
+pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
{
- /* XXX */
+ struct tte *tp;
+ vm_page_t m;
+
+ CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
+ pm->pm_context, start, end);
+ for (; start < end; start += PAGE_SIZE) {
+ if ((tp = tsb_tte_lookup(pm, start)) != NULL) {
+ m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
+ if ((tp->tte_data & TD_PV) != 0) {
+ if ((tp->tte_data & TD_W) != 0 &&
+ pmap_track_modified(pm, start))
+ vm_page_dirty(m);
+ if ((tp->tte_data & TD_REF) != 0)
+ vm_page_flag_set(m, PG_REFERENCED);
+ pv_remove(pm, m, start);
+ pmap_cache_remove(m, start);
+ }
+ atomic_clear_long(&tp->tte_data, TD_V);
+ tp->tte_tag = 0;
+ tp->tte_data = 0;
+ tlb_page_demap(TLB_ITLB | TLB_DTLB,
+ pm->pm_context, start);
+ }
+ }
}
/*
- * Copy a page of physical memory by temporarily mapping it into the tlb.
+ * Set the physical protection on the specified range of this map as requested.
*/
void
-pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
- struct tte tte;
+ struct tte *tp;
+ vm_page_t m;
+ u_long data;
- CTR2(KTR_PMAP, "pmap_copy_page: src=%#lx dst=%#lx", src, dst);
+ CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
+ pm->pm_context, sva, eva, prot);
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(CADDR1);
- tte.tte_data = TD_V | TD_8K | TD_PA(src) | TD_L | TD_CP | TD_P | TD_W;
- tlb_store(TLB_DTLB, CADDR1, TLB_CTX_KERNEL, tte);
+ KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
+ ("pmap_protect: non current pmap"));
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(CADDR2);
- tte.tte_data = TD_V | TD_8K | TD_PA(dst) | TD_L | TD_CP | TD_P | TD_W;
- tlb_store(TLB_DTLB, CADDR2, TLB_CTX_KERNEL, tte);
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pm, sva, eva);
+ return;
+ }
- bcopy((void *)CADDR1, (void *)CADDR2, PAGE_SIZE);
+ if (prot & VM_PROT_WRITE)
+ return;
+
+ for (; sva < eva; sva += PAGE_SIZE) {
+ if ((tp = tsb_tte_lookup(pm, sva)) != NULL) {
+ data = tp->tte_data;
+ if ((data & TD_PV) != 0) {
+ m = PHYS_TO_VM_PAGE(TD_GET_PA(data));
+ if ((data & TD_REF) != 0) {
+ vm_page_flag_set(m, PG_REFERENCED);
+ data &= ~TD_REF;
+ }
+ if ((data & TD_W) != 0 &&
+ pmap_track_modified(pm, sva)) {
+ vm_page_dirty(m);
+ }
+ }
+
+ data &= ~(TD_W | TD_SW);
- tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, CADDR1);
- tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, CADDR2);
+ CTR2(KTR_PMAP, "pmap_protect: new=%#lx old=%#lx",
+ data, tp->tte_data);
+
+ if (data != tp->tte_data) {
+ CTR0(KTR_PMAP, "pmap_protect: demap");
+ tlb_page_demap(TLB_DTLB | TLB_ITLB,
+ pm->pm_context, sva);
+ tp->tte_data = data;
+ }
+ }
+ }
}
/*
- * Zero a page of physical memory by temporarily mapping it into the tlb.
+ * Map the given physical page at the specified virtual address in the
+ * target pmap with the protection requested. If specified the page
+ * will be wired down.
*/
void
-pmap_zero_page(vm_offset_t pa)
+pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ boolean_t wired)
{
+ struct tte otte;
struct tte tte;
+ struct tte *tp;
+ vm_offset_t pa;
+ vm_page_t om;
- CTR1(KTR_PMAP, "pmap_zero_page: pa=%#lx", pa);
+ pa = VM_PAGE_TO_PHYS(m);
+ CTR6(KTR_PMAP,
+ "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
+ pm->pm_context, m, va, pa, prot, wired);
+
+ tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
+ tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_CP;
+
+ /*
+ * If there is an existing mapping, and the physical address has not
+ * changed, must be protection or wiring change.
+ */
+ if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
+ otte = *tp;
+ om = PHYS_TO_VM_PAGE(TD_GET_PA(otte.tte_data));
+
+ if (TD_GET_PA(otte.tte_data) == pa) {
+ CTR0(KTR_PMAP, "pmap_enter: update");
+
+ /*
+ * Wiring change, just update stats.
+ */
+ if (wired) {
+ if ((otte.tte_data & TD_WIRED) == 0)
+ pm->pm_stats.wired_count++;
+ } else {
+ if ((otte.tte_data & TD_WIRED) != 0)
+ pm->pm_stats.wired_count--;
+ }
+
+ if ((otte.tte_data & TD_CV) != 0)
+ tte.tte_data |= TD_CV;
+ if ((otte.tte_data & TD_REF) != 0)
+ tte.tte_data |= TD_REF;
+ if ((otte.tte_data & TD_PV) != 0) {
+ KASSERT((m->flags &
+ (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
+ ("pmap_enter: unmanaged pv page"));
+ tte.tte_data |= TD_PV;
+ }
+ /*
+ * If we're turning off write protection, sense modify
+ * status and remove the old mapping.
+ */
+ if ((prot & VM_PROT_WRITE) == 0 &&
+ (otte.tte_data & (TD_W | TD_SW)) != 0) {
+ if ((otte.tte_data & TD_PV) != 0) {
+ if (pmap_track_modified(pm, va))
+ vm_page_dirty(m);
+ }
+ tlb_page_demap(TLB_DTLB | TLB_ITLB,
+ TT_GET_CTX(otte.tte_tag), va);
+ }
+ } else {
+ CTR0(KTR_PMAP, "pmap_enter: replace");
+
+ /*
+ * Mapping has changed, invalidate old range.
+ */
+ if (!wired && (otte.tte_data & TD_WIRED) != 0)
+ pm->pm_stats.wired_count--;
+
+ /*
+ * Enter on the pv list if part of our managed memory.
+ */
+ if ((otte.tte_data & TD_PV) != 0) {
+ KASSERT((m->flags &
+ (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
+ ("pmap_enter: unmanaged pv page"));
+ if ((otte.tte_data & TD_REF) != 0)
+ vm_page_flag_set(om, PG_REFERENCED);
+ if ((otte.tte_data & TD_W) != 0 &&
+ pmap_track_modified(pm, va))
+ vm_page_dirty(om);
+ pv_remove(pm, om, va);
+ pv_insert(pm, m, va);
+ tte.tte_data |= TD_PV;
+ pmap_cache_remove(om, va);
+ if (pmap_cache_enter(m, va) != 0)
+ tte.tte_data |= TD_CV;
+ }
+ tlb_page_demap(TLB_DTLB | TLB_ITLB,
+ TT_GET_CTX(otte.tte_tag), va);
+ }
+ } else {
+ CTR0(KTR_PMAP, "pmap_enter: new");
+
+ /*
+ * Enter on the pv list if part of our managed memory.
+ */
+ if (pmap_initialized &&
+ (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+ pv_insert(pm, m, va);
+ tte.tte_data |= TD_PV;
+ if (pmap_cache_enter(m, va) != 0)
+ tte.tte_data |= TD_CV;
+ }
+
+ /*
+ * Increment counters.
+ */
+ if (wired)
+ pm->pm_stats.wired_count++;
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(CADDR2);
- tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
- tlb_store(TLB_DTLB, CADDR2, TLB_CTX_KERNEL, tte);
- bzero((void *)CADDR2, PAGE_SIZE);
- tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, CADDR2);
+ }
+
+ /*
+ * Now validate mapping with desired protection/wiring.
+ */
+ if (wired) {
+ tte.tte_data |= TD_REF | TD_WIRED;
+ if ((prot & VM_PROT_WRITE) != 0)
+ tte.tte_data |= TD_W;
+ }
+ if (pm->pm_context == TLB_CTX_KERNEL)
+ tte.tte_data |= TD_P;
+ if (prot & VM_PROT_WRITE)
+ tte.tte_data |= TD_SW;
+ if (prot & VM_PROT_EXECUTE) {
+ tte.tte_data |= TD_EXEC;
+#if 0
+ icache_inval_phys(pa, pa + PAGE_SIZE - 1);
+#endif
+ }
+
+ if (tp != NULL)
+ *tp = tte;
+ else
+ tsb_tte_enter(pm, m, va, tte);
}
void
-pmap_zero_page_area(vm_offset_t pa, int off, int size)
+pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size, int limit)
{
- struct tte tte;
+ KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
+ ("pmap_object_init_pt: non current pmap"));
+ /* XXX */
+}
- CTR3(KTR_PMAP, "pmap_zero_page_area: pa=%#lx off=%#x size=%#x",
- pa, off, size);
+void
+pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
+{
+ KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
+ ("pmap_prefault: non current pmap"));
+ /* XXX */
+}
- KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(CADDR2);
- tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
- tlb_store(TLB_DTLB, CADDR2, TLB_CTX_KERNEL, tte);
- bzero((char *)CADDR2 + off, size);
- tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, CADDR2);
+/*
+ * Change the wiring attribute for a map/virtual-address pair.
+ * The mapping must already exist in the pmap.
+ */
+void
+pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
+{
+ struct tte *tp;
+
+ if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
+ if (wired) {
+ if ((tp->tte_data & TD_WIRED) == 0)
+ pm->pm_stats.wired_count++;
+ tp->tte_data |= TD_WIRED;
+ } else {
+ if ((tp->tte_data & TD_WIRED) != 0)
+ pm->pm_stats.wired_count--;
+ tp->tte_data &= ~TD_WIRED;
+ }
+ }
}
-vm_offset_t
-pmap_extract(pmap_t pmap, vm_offset_t va)
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
+ vm_size_t len, vm_offset_t src_addr)
{
- struct stte *stp;
+ /* XXX */
+}
- stp = tsb_stte_lookup(pmap, va);
- if (stp == NULL)
- return (0);
- else
- return (TD_PA(stp->st_tte.tte_data) | (va & PAGE_MASK));
+/*
+ * Zero a page of physical memory by temporarily mapping it into the tlb.
+ */
+void
+pmap_zero_page(vm_offset_t pa)
+{
+
+ CTR1(KTR_PMAP, "pmap_zero_page: pa=%#lx", pa);
+ dcache_inval_phys(pa, pa + PAGE_SIZE);
+ aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
}
-vm_offset_t
-pmap_kextract(vm_offset_t va)
+void
+pmap_zero_page_area(vm_offset_t pa, int off, int size)
{
- struct stte *stp;
- stp = tsb_kvtostte(va);
- KASSERT((stp->st_tte.tte_data & TD_V) != 0,
- ("pmap_kextract: invalid virtual address 0x%lx", va));
- return (TD_PA(stp->st_tte.tte_data) | (va & PAGE_MASK));
+ CTR3(KTR_PMAP, "pmap_zero_page_area: pa=%#lx off=%#x size=%#x",
+ pa, off, size);
+ KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
+ dcache_inval_phys(pa + off, pa + off + size);
+ aszero(ASI_PHYS_USE_EC, pa + off, size);
}
-int
-pmap_mincore(pmap_t pmap, vm_offset_t addr)
+/*
+ * Copy a page of physical memory by temporarily mapping it into the tlb.
+ */
+void
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
{
- TODO;
- return (0);
+
+ CTR2(KTR_PMAP, "pmap_copy_page: src=%#lx dst=%#lx", src, dst);
+ dcache_inval_phys(dst, dst + PAGE_SIZE);
+ ascopy(ASI_PHYS_USE_EC, src, dst, PAGE_SIZE);
}
+/*
+ * Make the specified page pageable (or not). Unneeded.
+ */
void
-pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size, int limit)
+pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+ boolean_t pageable)
{
- /* XXX */
}
+/*
+ * Return true of a physical page resided in the given pmap.
+ */
boolean_t
pmap_page_exists(pmap_t pm, vm_page_t m)
{
- vm_offset_t pstp;
- vm_offset_t pvh;
- vm_offset_t pa;
- u_long tag;
if (m->flags & PG_FICTITIOUS)
return (FALSE);
- pa = VM_PAGE_TO_PHYS(m);
- pvh = pv_lookup(pa);
- PV_LOCK();
- for (pstp = pvh_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
- tag = pv_get_tte_tag(pstp);
- if (TT_GET_CTX(tag) == pm->pm_context) {
- PV_UNLOCK();
- return (TRUE);
- }
- }
- PV_UNLOCK();
- return (FALSE);
-}
-
-void
-pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
-{
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_prefault: non current pmap"));
- /* XXX */
+ return (pv_page_exists(pm, m));
}
+/*
+ * Remove all pages from specified address space, this aids process exit
+ * speeds. This is much faster than pmap_remove n the case of running down
+ * an entire address space. Only works for the current pmap.
+ */
void
-pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
- struct stte *stp;
+ struct tte *tp;
+ pv_entry_t npv;
+ pv_entry_t pv;
vm_page_t m;
- u_long data;
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_protect: non current pmap"));
+ ("pmap_remove_pages: non current pmap"));
+ npv = NULL;
+ for (pv = TAILQ_FIRST(&pm->pm_pvlist); pv != NULL; pv = npv) {
+ npv = TAILQ_NEXT(pv, pv_plist);
+ if (pv->pv_va >= eva || pv->pv_va < sva)
+ continue;
+ if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) == NULL)
+ continue;
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
- pmap_remove(pm, sva, eva);
- return;
+ /*
+ * We cannot remove wired pages at this time.
+ */
+ if ((tp->tte_data & TD_WIRED) != 0)
+ continue;
+
+ atomic_clear_long(&tp->tte_data, TD_V);
+ tp->tte_tag = 0;
+ tp->tte_data = 0;
+
+ m = pv->pv_m;
+
+ pv->pv_pmap->pm_stats.resident_count--;
+ m->md.pv_list_count--;
+ pmap_cache_remove(m, pv->pv_va);
+ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ if (TAILQ_EMPTY(&m->md.pv_list))
+ vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
+ pv_free(pv);
}
+ tlb_context_primary_demap(TLB_DTLB | TLB_ITLB);
+}
- if (prot & VM_PROT_WRITE)
- return;
+/*
+ * Lower the permission for all mappings to a given page.
+ */
+void
+pmap_page_protect(vm_page_t m, vm_prot_t prot)
+{
- for (; sva < eva; sva += PAGE_SIZE) {
- if ((stp = tsb_stte_lookup(pm, sva)) != NULL) {
- data = stp->st_tte.tte_data;
- if ((data & TD_MNG) != 0) {
- m = NULL;
- if ((data & TD_REF) != 0) {
- m = PHYS_TO_VM_PAGE(TD_PA(data));
- vm_page_flag_set(m, PG_REFERENCED);
- data &= ~TD_REF;
- }
- if ((data & TD_W) != 0 &&
- pmap_track_modified(sva)) {
- if (m == NULL)
- m = PHYS_TO_VM_PAGE(TD_PA(data));
- vm_page_dirty(m);
- data &= ~TD_W;
- }
- }
-
- data &= ~TD_SW;
-
- if (data != stp->st_tte.tte_data) {
- stp->st_tte.tte_data = data;
- tsb_tte_local_remove(&stp->st_tte);
- }
- }
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
+ pv_bit_clear(m, TD_W | TD_SW);
+ else
+ pv_remove_all(m);
}
}
@@ -1161,23 +1499,122 @@ pmap_phys_address(int ppn)
return (sparc64_ptob(ppn));
}
+int
+pmap_ts_referenced(vm_page_t m)
+{
+
+ if (m->flags & PG_FICTITIOUS)
+ return (0);
+ return (pv_bit_count(m, TD_REF));
+}
+
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+
+ if (m->flags & PG_FICTITIOUS)
+ return FALSE;
+ return (pv_bit_test(m, TD_W));
+}
+
void
-pmap_reference(pmap_t pm)
+pmap_clear_modify(vm_page_t m)
{
- if (pm != NULL)
- pm->pm_count++;
+
+ if (m->flags & PG_FICTITIOUS)
+ return;
+ pv_bit_clear(m, TD_W);
}
void
-pmap_release(pmap_t pm)
+pmap_clear_reference(vm_page_t m)
{
- /* XXX */
+
+ if (m->flags & PG_FICTITIOUS)
+ return;
+ pv_bit_clear(m, TD_REF);
}
+int
+pmap_mincore(pmap_t pm, vm_offset_t addr)
+{
+ TODO;
+ return (0);
+}
+
+/*
+ * Activate a user pmap. The pmap must be activated before its address space
+ * can be accessed in any way.
+ */
void
-pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+pmap_activate(struct thread *td)
{
- KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
- ("pmap_remove_pages: non current pmap"));
- /* XXX */
+ vm_offset_t tsb;
+ u_long context;
+ u_long data;
+ pmap_t pm;
+
+ /*
+ * Load all the data we need up front to encourage the compiler to
+ * not issue any loads while we have interrupts disable below.
+ */
+ pm = &td->td_proc->p_vmspace->vm_pmap;
+ context = pm->pm_context;
+ data = pm->pm_tsb_tte->tte_data;
+ tsb = (vm_offset_t)pm->pm_tsb;
+
+ KASSERT(context != 0, ("pmap_activate: activating nucleus context"));
+ KASSERT(context != -1, ("pmap_activate: steal context"));
+ KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
+
+ pm->pm_active |= 1 << PCPU_GET(cpuid);
+
+ wrpr(pstate, 0, PSTATE_MMU);
+ __asm __volatile("mov %0, %%g7" : : "r" (tsb));
+ wrpr(pstate, 0, PSTATE_NORMAL);
+ stxa(TLB_DEMAP_VA(tsb) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
+ ASI_DMMU_DEMAP, 0);
+ membar(Sync);
+ stxa(AA_DMMU_TAR, ASI_DMMU, tsb);
+ stxa(0, ASI_DTLB_DATA_IN_REG, data | TD_L);
+ membar(Sync);
+ stxa(AA_DMMU_PCXR, ASI_DMMU, context);
+ membar(Sync);
+ wrpr(pstate, 0, PSTATE_KERNEL);
+}
+
+vm_offset_t
+pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
+{
+
+ return (va);
+}
+
+/*
+ * Allocate a hardware context number from the context map.
+ */
+static u_int
+pmap_context_alloc(void)
+{
+ u_int i;
+
+ i = pmap_context_base;
+ do {
+ if (pmap_context_map[i] == 0) {
+ pmap_context_map[i] = 1;
+ pmap_context_base = (i + 1) & (PMAP_CONTEXT_MAX - 1);
+ return (i);
+ }
+ } while ((i = (i + 1) & (PMAP_CONTEXT_MAX - 1)) != pmap_context_base);
+ panic("pmap_context_alloc");
+}
+
+/*
+ * Free a hardware context number back to the context map.
+ */
+static void
+pmap_context_destroy(u_int i)
+{
+
+ pmap_context_map[i] = 0;
}