diff options
author | Thomas Moestl <tmm@FreeBSD.org> | 2002-04-02 17:50:13 +0000 |
---|---|---|
committer | Thomas Moestl <tmm@FreeBSD.org> | 2002-04-02 17:50:13 +0000 |
commit | 816663a0b3e0fc242461d58ffde923432824641a (patch) | |
tree | 9405a89120d52be01a8d9821ab90f4b5a21ec834 /sys/sparc64 | |
parent | 72a492cacff2c61c71bf4ea178e0ca8f810d3b47 (diff) | |
download | src-816663a0b3e0fc242461d58ffde923432824641a.tar.gz src-816663a0b3e0fc242461d58ffde923432824641a.zip |
Fix crashes that would happen when more than one 4MB page was used to
hold the kernel text, data and loader metadata by not using a fixed slot
to store the TSB page(s) into. Enter fake 8k page entries into the kernel
TSB that cover the 4M kernel page(s), sot that pmap_kenter() will work
without having to treat these pages as a special case.
Problem reported by: mjacob, obrien
Problem spotted and 4M page handling proposed by: jake
Notes
Notes:
svn path=/head/; revision=93687
Diffstat (limited to 'sys/sparc64')
-rw-r--r-- | sys/sparc64/include/tlb.h | 5 | ||||
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 29 |
2 files changed, 13 insertions, 21 deletions
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h index aa4b18617d63..9a84de0275f3 100644 --- a/sys/sparc64/include/tlb.h +++ b/sys/sparc64/include/tlb.h @@ -29,11 +29,6 @@ #ifndef _MACHINE_TLB_H_ #define _MACHINE_TLB_H_ -#define TLB_SLOT_COUNT 64 /* XXX */ - -#define TLB_SLOT_TSB_KERNEL_MIN 62 /* XXX */ -#define TLB_SLOT_KERNEL 63 /* XXX */ - #define TLB_DAR_SLOT_SHIFT (3) #define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT) diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index ad425b3b5963..c0dd72cd94eb 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -147,12 +147,6 @@ vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* - * The locked kernel page the kernel binary was loaded into. This will need - * to become a list later. - */ -vm_offset_t kernel_page; - -/* * Kernel pmap. */ struct pmap kernel_pmap_store; @@ -281,10 +275,6 @@ pmap_bootstrap(vm_offset_t ekva) virtual_avail = roundup2(ekva, PAGE_SIZE_4M); virtual_end = VM_MAX_KERNEL_ADDRESS; - /* Look up the page the kernel binary was loaded into. */ - kernel_page = TD_GET_PA(ldxa(TLB_DAR_SLOT(TLB_SLOT_KERNEL), - ASI_DTLB_DATA_ACCESS_REG)); - /* * Find out what physical memory is available from the prom and * initialize the phys_avail array. This must be done before @@ -327,6 +317,17 @@ pmap_bootstrap(vm_offset_t ekva) bzero(tsb_kernel, KVA_PAGES * PAGE_SIZE_4M); /* + * Enter fake 8k pages for the 4MB kernel pages, so that + * pmap_kextract() will work for them. + */ + for (i = 0; i < kernel_tlb_slots; i++) { + va = TV_GET_VA(kernel_ttes[i].tte_vpn); + pa = TD_GET_PA(kernel_ttes[i].tte_data); + for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) + pmap_kenter(va + off, pa + off); + } + + /* * Allocate a kernel stack with guard page for thread0 and map it into * the kernel tsb. */ @@ -414,7 +415,6 @@ pmap_map_tsb(void) vm_offset_t va; vm_offset_t pa; u_long data; - u_int slot; u_long s; int i; @@ -423,15 +423,14 @@ pmap_map_tsb(void) /* * Map the 4mb tsb pages. */ - slot = TLB_SLOT_TSB_KERNEL_MIN; - for (i = 0; i < KVA_PAGES; i++, slot++) { + for (i = 0; i < KVA_PAGES; i++) { va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M; pa = tsb_kernel_phys + i * PAGE_SIZE_4M; data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV | TD_P | TD_W; stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(TLB_CTX_KERNEL)); - stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, data); + stxa(0, ASI_DTLB_DATA_IN_REG, data); membar(Sync); } @@ -598,8 +597,6 @@ pmap_kextract(vm_offset_t va) struct tte *tp; u_long d; - if (va >= KERNBASE && va < KERNBASE + PAGE_SIZE_4M) - return (kernel_page + (va & PAGE_MASK_4M)); tp = tsb_kvtotte(va); d = tp->tte_data; if ((d & TD_V) == 0) |