diff options
author | Peter Wemm <peter@FreeBSD.org> | 2002-08-03 01:02:37 +0000 |
---|---|---|
committer | Peter Wemm <peter@FreeBSD.org> | 2002-08-03 01:02:37 +0000 |
commit | 8f1586dd655db6b5dd75efdbb0da07a59e70577a (patch) | |
tree | f7b6181bd37c9f08bc0942a27e7c2acbc3a6620b /sys | |
parent | 1af04fadd157e873398b1c01497c441decd84434 (diff) | |
download | src-8f1586dd655db6b5dd75efdbb0da07a59e70577a.tar.gz src-8f1586dd655db6b5dd75efdbb0da07a59e70577a.zip |
Take advantage of the fact that there is a small 1MB direct mapped region
on x86 in between KERNBASE and the kernel load address. pmap_mapdev()
can return pointers to this for devices operating in the isa "hole".
Notes
Notes:
svn path=/head/; revision=101249
Diffstat (limited to 'sys')
-rw-r--r-- | sys/amd64/amd64/pmap.c | 19 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 19 |
2 files changed, 22 insertions, 16 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index ffd23d49aafa..409e4ec4a2d9 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -3282,27 +3282,28 @@ pmap_mapdev(pa, size) vm_size_t size; { vm_offset_t va, tmpva, offset; - pt_entry_t *pte; offset = pa & PAGE_MASK; - size = roundup(offset + size, PAGE_SIZE); + size = round_page(offset + size); + pa = trunc_page(pa); - GIANT_REQUIRED; + /* We have a 1MB direct mapped region at KERNBASE */ + if (pa < 0x00100000 && pa + size <= 0x00100000) + return (void *)(pa + KERNBASE); + GIANT_REQUIRED; va = kmem_alloc_pageable(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); - pa = pa & PG_FRAME; for (tmpva = va; size > 0; ) { - pte = vtopte(tmpva); - *pte = pa | PG_RW | PG_V | pgeflag; + pmap_kenter(tmpva, pa); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); - return ((void *)(va + offset)); + return (void *)(va + offset); } void @@ -3315,7 +3316,9 @@ pmap_unmapdev(va, size) base = va & PG_FRAME; offset = va & PAGE_MASK; - size = roundup(offset + size, PAGE_SIZE); + size = round_page(offset + size); + if (base >= KERNBASE && va + size <= KERNBASE + 0x00100000) + return; /* direct mapped */ for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { pte = vtopte(tmpva); *pte = 0; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index ffd23d49aafa..409e4ec4a2d9 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -3282,27 +3282,28 @@ pmap_mapdev(pa, size) vm_size_t size; { vm_offset_t va, tmpva, offset; - pt_entry_t *pte; offset = pa & PAGE_MASK; - size = roundup(offset + size, PAGE_SIZE); + size = round_page(offset + size); + pa = trunc_page(pa); - GIANT_REQUIRED; + /* We have a 1MB direct mapped region at KERNBASE */ + if (pa < 0x00100000 && pa + size <= 0x00100000) + return (void *)(pa + KERNBASE); + GIANT_REQUIRED; va = kmem_alloc_pageable(kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); - pa = pa & PG_FRAME; for (tmpva = va; size > 0; ) { - pte = vtopte(tmpva); - *pte = pa | PG_RW | PG_V | pgeflag; + pmap_kenter(tmpva, pa); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); - return ((void *)(va + offset)); + return (void *)(va + offset); } void @@ -3315,7 +3316,9 @@ pmap_unmapdev(va, size) base = va & PG_FRAME; offset = va & PAGE_MASK; - size = roundup(offset + size, PAGE_SIZE); + size = round_page(offset + size); + if (base >= KERNBASE && va + size <= KERNBASE + 0x00100000) + return; /* direct mapped */ for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { pte = vtopte(tmpva); *pte = 0; |