aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2011-10-30 05:06:14 +0000
committerAlan Cox <alc@FreeBSD.org>2011-10-30 05:06:14 +0000
commit5c1f2cc4c2288defcf734f982c9dcbe94938eae4 (patch)
treec68c62addf3e0c34caba615d9beb3884e1aa3b90 /sys
parent2bb64536627ea2882950cd3fbbc077f8c6bf6bb5 (diff)
downloadsrc-5c1f2cc4c2288defcf734f982c9dcbe94938eae4.tar.gz
src-5c1f2cc4c2288defcf734f982c9dcbe94938eae4.zip
Eliminate vm_phys_bootstrap_alloc(). It was a failed attempt at
eliminating duplicated code in the various pmap implementations. Micro-optimize vm_phys_free_pages(). Introduce vm_phys_free_contig(). It is fast routine for freeing an arbitrary number of physically contiguous pages. In particular, it doesn't require the number of pages to be a power of two. Use "u_long" instead of "unsigned long". Bruce Evans (bde@) has convinced me that the "boundary" parameters to kmem_alloc_contig(), vm_phys_alloc_contig(), and vm_reserv_reclaim_contig() should be of type "vm_paddr_t" and not "u_long". Make this change.
Notes
Notes: svn path=/head/; revision=226928
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/vm_contig.c3
-rw-r--r--sys/vm/vm_extern.h2
-rw-r--r--sys/vm/vm_phys.c116
-rw-r--r--sys/vm/vm_phys.h7
-rw-r--r--sys/vm/vm_reserv.c2
-rw-r--r--sys/vm/vm_reserv.h3
6 files changed, 76 insertions, 57 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 31aac3b1a9e2..81fe6a5c8f9c 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -335,7 +335,8 @@ contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
vm_offset_t
kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
- vm_paddr_t high, u_long alignment, u_long boundary, vm_memattr_t memattr)
+ vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
+ vm_memattr_t memattr)
{
vm_offset_t ret;
vm_page_t pages;
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 373d57ecf47c..74a9b394d89c 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -44,7 +44,7 @@ vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
- vm_paddr_t low, vm_paddr_t high, u_long alignment, u_long boundary,
+ vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index b47455d71b61..1793ed83259c 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -490,26 +490,6 @@ vm_phys_alloc_freelist_pages(int flind, int pool, int order)
}
/*
- * Allocate physical memory from phys_avail[].
- */
-vm_paddr_t
-vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
-{
- vm_paddr_t pa;
- int i;
-
- size = round_page(size);
- for (i = 0; phys_avail[i + 1] != 0; i += 2) {
- if (phys_avail[i + 1] - phys_avail[i] < size)
- continue;
- pa = phys_avail[i];
- phys_avail[i] += size;
- return (pa);
- }
- panic("vm_phys_bootstrap_alloc");
-}
-
-/*
* Find the vm_page corresponding to the given physical address.
*/
vm_page_t
@@ -554,7 +534,7 @@ vm_phys_free_pages(vm_page_t m, int order)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
- vm_paddr_t pa, pa_buddy;
+ vm_paddr_t pa;
vm_page_t m_buddy;
KASSERT(m->order == VM_NFREEORDER,
@@ -566,25 +546,26 @@ vm_phys_free_pages(vm_page_t m, int order)
KASSERT(order < VM_NFREEORDER,
("vm_phys_free_pages: order %d is out of range", order));
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- pa = VM_PAGE_TO_PHYS(m);
seg = &vm_phys_segs[m->segind];
- while (order < VM_NFREEORDER - 1) {
- pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
- if (pa_buddy < seg->start ||
- pa_buddy >= seg->end)
- break;
- m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
- if (m_buddy->order != order)
- break;
- fl = (*seg->free_queues)[m_buddy->pool];
- TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
- fl[m_buddy->order].lcnt--;
- m_buddy->order = VM_NFREEORDER;
- if (m_buddy->pool != m->pool)
- vm_phys_set_pool(m->pool, m_buddy, order);
- order++;
- pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
- m = &seg->first_page[atop(pa - seg->start)];
+ if (order < VM_NFREEORDER - 1) {
+ pa = VM_PAGE_TO_PHYS(m);
+ do {
+ pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
+ if (pa < seg->start || pa >= seg->end)
+ break;
+ m_buddy = &seg->first_page[atop(pa - seg->start)];
+ if (m_buddy->order != order)
+ break;
+ fl = (*seg->free_queues)[m_buddy->pool];
+ TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq);
+ fl[order].lcnt--;
+ m_buddy->order = VM_NFREEORDER;
+ if (m_buddy->pool != m->pool)
+ vm_phys_set_pool(m->pool, m_buddy, order);
+ order++;
+ pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
+ m = &seg->first_page[atop(pa - seg->start)];
+ } while (order < VM_NFREEORDER - 1);
}
m->order = order;
fl = (*seg->free_queues)[m->pool];
@@ -593,6 +574,47 @@ vm_phys_free_pages(vm_page_t m, int order)
}
/*
+ * Free a contiguous, arbitrarily sized set of physical pages.
+ *
+ * The free page queues must be locked.
+ */
+void
+vm_phys_free_contig(vm_page_t m, u_long npages)
+{
+ u_int n;
+ int order;
+
+ /*
+ * Avoid unnecessary coalescing by freeing the pages in the largest
+ * possible power-of-two-sized subsets.
+ */
+ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+ for (;; npages -= n) {
+ /*
+ * Unsigned "min" is used here so that "order" is assigned
+ * "VM_NFREEORDER - 1" when "m"'s physical address is zero
+ * or the low-order bits of its physical address are zero
+ * because the size of a physical address exceeds the size of
+ * a long.
+ */
+ order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
+ VM_NFREEORDER - 1);
+ n = 1 << order;
+ if (npages < n)
+ break;
+ vm_phys_free_pages(m, order);
+ m += n;
+ }
+ /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
+ for (; npages > 0; npages -= n) {
+ order = flsl(npages) - 1;
+ n = 1 << order;
+ vm_phys_free_pages(m, order);
+ m += n;
+ }
+}
+
+/*
* Set the pool for a contiguous, power of two-sized set of physical pages.
*/
void
@@ -728,14 +750,15 @@ vm_phys_zero_pages_idle(void)
* "alignment" and "boundary" must be a power of two.
*/
vm_page_t
-vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
- unsigned long alignment, unsigned long boundary)
+vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
+ u_long alignment, vm_paddr_t boundary)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
struct vnode *vp;
vm_paddr_t pa, pa_last, size;
vm_page_t deferred_vdrop_list, m, m_ret;
+ u_long npages_end;
int domain, flind, i, oind, order, pind;
#if VM_NDOMAIN > 1
@@ -848,13 +871,10 @@ done:
deferred_vdrop_list = m;
}
}
- for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
- m = &m_ret[i];
- KASSERT(m->order == VM_NFREEORDER,
- ("vm_phys_alloc_contig: page %p has unexpected order %d",
- m, m->order));
- vm_phys_free_pages(m, 0);
- }
+ /* Return excess pages to the free lists. */
+ npages_end = roundup2(npages, 1 << imin(oind, order));
+ if (npages < npages_end)
+ vm_phys_free_contig(&m_ret[npages], npages_end - npages);
mtx_unlock(&vm_page_queue_free_mtx);
while (deferred_vdrop_list != NULL) {
vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index a5b9e93c0e4e..847a63328184 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -50,12 +50,11 @@ struct mem_affinity {
extern struct mem_affinity *mem_affinity;
void vm_phys_add_page(vm_paddr_t pa);
-vm_page_t vm_phys_alloc_contig(unsigned long npages,
- vm_paddr_t low, vm_paddr_t high,
- unsigned long alignment, unsigned long boundary);
+vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
+ u_long alignment, vm_paddr_t boundary);
vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
vm_page_t vm_phys_alloc_pages(int pool, int order);
-vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment);
+void vm_phys_free_contig(vm_page_t m, u_long npages);
void vm_phys_free_pages(vm_page_t m, int order);
void vm_phys_init(void);
void vm_phys_set_pool(int pool, vm_page_t m, int order);
diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c
index fec0539a97e5..12340ff6c79d 100644
--- a/sys/vm/vm_reserv.c
+++ b/sys/vm/vm_reserv.c
@@ -628,7 +628,7 @@ vm_reserv_reclaim_inactive(void)
*/
boolean_t
vm_reserv_reclaim_contig(vm_paddr_t size, vm_paddr_t low, vm_paddr_t high,
- unsigned long alignment, unsigned long boundary)
+ u_long alignment, vm_paddr_t boundary)
{
vm_paddr_t pa, pa_length;
vm_reserv_t rv;
diff --git a/sys/vm/vm_reserv.h b/sys/vm/vm_reserv.h
index 9eb1d0668a65..9ab95682558d 100644
--- a/sys/vm/vm_reserv.h
+++ b/sys/vm/vm_reserv.h
@@ -49,8 +49,7 @@ void vm_reserv_init(void);
int vm_reserv_level_iffullpop(vm_page_t m);
boolean_t vm_reserv_reactivate_page(vm_page_t m);
boolean_t vm_reserv_reclaim_contig(vm_paddr_t size, vm_paddr_t low,
- vm_paddr_t high, unsigned long alignment,
- unsigned long boundary);
+ vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
boolean_t vm_reserv_reclaim_inactive(void);
void vm_reserv_rename(vm_page_t m, vm_object_t new_object,
vm_object_t old_object, vm_pindex_t old_object_offset);