aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_phys.c
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2014-11-15 23:40:44 +0000
committerAlan Cox <alc@FreeBSD.org>2014-11-15 23:40:44 +0000
commit271f0f1219b6fa658a127c47a6d7cae321cc9be1 (patch)
tree0f1c627f32faa09851b8a412dd9586dabb0109ad /sys/vm/vm_phys.c
parenteb81bf559cb718c7e2430ef24ab6187f6bb86362 (diff)
downloadsrc-271f0f1219b6fa658a127c47a6d7cae321cc9be1.tar.gz
src-271f0f1219b6fa658a127c47a6d7cae321cc9be1.zip
Enable the use of VM_PHYSSEG_SPARSE on amd64 and i386, making it the default
on i386 PAE. Previously, VM_PHYSSEG_SPARSE could not be used on amd64 and i386 because vm_page_startup() would not create vm_page structures for the kernel page table pages allocated during pmap_bootstrap() but those vm_page structures are needed when the kernel attempts to promote the corresponding kernel virtual addresses to superpage mappings. To address this problem, a new public function, vm_phys_add_seg(), is introduced and vm_phys_init() is updated to reflect the creation of vm_phys_seg structures by calls to vm_phys_add_seg(). Discussed with: Svatopluk Kraus MFC after: 3 weeks Sponsored by: EMC / Isilon Storage Division
Notes
Notes: svn path=/head/; revision=274556
Diffstat (limited to 'sys/vm/vm_phys.c')
-rw-r--r--sys/vm/vm_phys.c101
1 files changed, 56 insertions, 45 deletions
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index be3d5be78079..95369a8dcb55 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -301,29 +301,19 @@ static void
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
{
struct vm_phys_seg *seg;
-#ifdef VM_PHYSSEG_SPARSE
- long pages;
- int segind;
- pages = 0;
- for (segind = 0; segind < vm_phys_nsegs; segind++) {
- seg = &vm_phys_segs[segind];
- pages += atop(seg->end - seg->start);
- }
-#endif
KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
KASSERT(domain < vm_ndomains,
("vm_phys_create_seg: invalid domain provided"));
seg = &vm_phys_segs[vm_phys_nsegs++];
+ while (seg > vm_phys_segs && (seg - 1)->start >= end) {
+ *seg = *(seg - 1);
+ seg--;
+ }
seg->start = start;
seg->end = end;
seg->domain = domain;
-#ifdef VM_PHYSSEG_SPARSE
- seg->first_page = &vm_page_array[pages];
-#else
- seg->first_page = PHYS_TO_VM_PAGE(start);
-#endif
seg->free_queues = &vm_phys_free_queues[domain][flind];
}
@@ -357,47 +347,68 @@ vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
}
/*
- * Initialize the physical memory allocator.
+ * Add a physical memory segment.
*/
void
-vm_phys_init(void)
+vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
{
- struct vm_freelist *fl;
- int dom, flind, i, oind, pind;
- for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ KASSERT((start & PAGE_MASK) == 0,
+ ("vm_phys_define_seg: start is not page aligned"));
+ KASSERT((end & PAGE_MASK) == 0,
+ ("vm_phys_define_seg: end is not page aligned"));
#ifdef VM_FREELIST_ISADMA
- if (phys_avail[i] < 16777216) {
- if (phys_avail[i + 1] > 16777216) {
- vm_phys_create_seg(phys_avail[i], 16777216,
- VM_FREELIST_ISADMA);
- vm_phys_create_seg(16777216, phys_avail[i + 1],
- VM_FREELIST_DEFAULT);
- } else {
- vm_phys_create_seg(phys_avail[i],
- phys_avail[i + 1], VM_FREELIST_ISADMA);
- }
- if (VM_FREELIST_ISADMA >= vm_nfreelists)
- vm_nfreelists = VM_FREELIST_ISADMA + 1;
+ if (start < 16777216) {
+ if (end > 16777216) {
+ vm_phys_create_seg(start, 16777216,
+ VM_FREELIST_ISADMA);
+ vm_phys_create_seg(16777216, end, VM_FREELIST_DEFAULT);
} else
+ vm_phys_create_seg(start, end, VM_FREELIST_ISADMA);
+ if (VM_FREELIST_ISADMA >= vm_nfreelists)
+ vm_nfreelists = VM_FREELIST_ISADMA + 1;
+ } else
#endif
#ifdef VM_FREELIST_HIGHMEM
- if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
- if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
- vm_phys_create_seg(phys_avail[i],
- VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
- vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
- phys_avail[i + 1], VM_FREELIST_HIGHMEM);
- } else {
- vm_phys_create_seg(phys_avail[i],
- phys_avail[i + 1], VM_FREELIST_HIGHMEM);
- }
- if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
- vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
+ if (end > VM_HIGHMEM_ADDRESS) {
+ if (start < VM_HIGHMEM_ADDRESS) {
+ vm_phys_create_seg(start, VM_HIGHMEM_ADDRESS,
+ VM_FREELIST_DEFAULT);
+ vm_phys_create_seg(VM_HIGHMEM_ADDRESS, end,
+ VM_FREELIST_HIGHMEM);
} else
+ vm_phys_create_seg(start, end, VM_FREELIST_HIGHMEM);
+ if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
+ vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
+ } else
+#endif
+ vm_phys_create_seg(start, end, VM_FREELIST_DEFAULT);
+}
+
+/*
+ * Initialize the physical memory allocator.
+ */
+void
+vm_phys_init(void)
+{
+ struct vm_freelist *fl;
+ struct vm_phys_seg *seg;
+#ifdef VM_PHYSSEG_SPARSE
+ long pages;
+#endif
+ int dom, flind, oind, pind, segind;
+
+#ifdef VM_PHYSSEG_SPARSE
+ pages = 0;
+#endif
+ for (segind = 0; segind < vm_phys_nsegs; segind++) {
+ seg = &vm_phys_segs[segind];
+#ifdef VM_PHYSSEG_SPARSE
+ seg->first_page = &vm_page_array[pages];
+ pages += atop(seg->end - seg->start);
+#else
+ seg->first_page = PHYS_TO_VM_PAGE(seg->start);
#endif
- vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
- VM_FREELIST_DEFAULT);
}
for (dom = 0; dom < vm_ndomains; dom++) {
for (flind = 0; flind < vm_nfreelists; flind++) {