aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMichael Zhilin <mizhka@FreeBSD.org>2017-12-04 08:08:55 +0000
committerMichael Zhilin <mizhka@FreeBSD.org>2017-12-04 08:08:55 +0000
commit0db2102aaad0f26fb8681ce0486a87a9f053fec8 (patch)
treeea861c0bc1d2f46678389a85b0de4f90eaa3b82b /sys
parent6f3d4ec84d392c351926b7a02bfcd2001e78d4a2 (diff)
downloadsrc-0db2102aaad0f26fb8681ce0486a87a9f053fec8.tar.gz
src-0db2102aaad0f26fb8681ce0486a87a9f053fec8.zip
[mips] [vm] restore translation of freelist to flind for page allocation
Commit r326346 moved domain iterators from physical layer to vm_page one, but it also removed translation of freelist to flind for vm_page_alloc_freelist() call. Before it expects VM_FREELIST_ parameter, but after it expect freelist index. On small WiFi boxes with few megabytes of RAM, there is only one freelist VM_FREELIST_LOWMEM (1) and there is no VM_FREELIST_DEFAULT(0) (see file sys/mips/include/vmparam.h). It results in freelist 1 with flind 0. At first, this commit renames flind to freelist in vm_page_alloc_freelist to avoid misunderstanding about input parameters. Then on physical layer it restores translation for correct handling of freelist parameter. Reported by: landonf Reviewed by: jeff Differential Revision: https://reviews.freebsd.org/D13351
Notes
Notes: svn path=/head/; revision=326508
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/vm_page.c8
-rw-r--r--sys/vm/vm_phys.c17
2 files changed, 15 insertions, 10 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 4711af9d16de..0397dfef4575 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -2043,7 +2043,7 @@ vm_page_alloc_check(vm_page_t m)
* VM_ALLOC_ZERO prefer a zeroed page
*/
vm_page_t
-vm_page_alloc_freelist(int flind, int req)
+vm_page_alloc_freelist(int freelist, int req)
{
struct vm_domain_iterator vi;
vm_page_t m;
@@ -2056,7 +2056,7 @@ vm_page_alloc_freelist(int flind, int req)
while (vm_domain_iterator_run(&vi, &domain) == 0) {
if (vm_domain_iterator_isdone(&vi))
req |= wait;
- m = vm_page_alloc_freelist_domain(domain, flind, req);
+ m = vm_page_alloc_freelist_domain(domain, freelist, req);
if (m != NULL)
break;
}
@@ -2066,7 +2066,7 @@ vm_page_alloc_freelist(int flind, int req)
}
vm_page_t
-vm_page_alloc_freelist_domain(int domain, int flind, int req)
+vm_page_alloc_freelist_domain(int domain, int freelist, int req)
{
vm_page_t m;
u_int flags, free_count;
@@ -2090,7 +2090,7 @@ again:
vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
(req_class == VM_ALLOC_INTERRUPT &&
vm_cnt.v_free_count > 0))
- m = vm_phys_alloc_freelist_pages(domain, flind,
+ m = vm_phys_alloc_freelist_pages(domain, freelist,
VM_FREEPOOL_DIRECT, 0);
if (m == NULL) {
if (vm_page_alloc_fail(NULL, req))
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index 2bfaf92f0234..4f115153d5eb 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -603,10 +603,10 @@ vm_page_t
vm_phys_alloc_pages(int domain, int pool, int order)
{
vm_page_t m;
- int flind;
+ int freelist;
- for (flind = 0; flind < vm_nfreelists; flind++) {
- m = vm_phys_alloc_freelist_pages(domain, flind, pool, order);
+ for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
+ m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
if (m != NULL)
return (m);
}
@@ -621,16 +621,16 @@ vm_phys_alloc_pages(int domain, int pool, int order)
* The free page queues must be locked.
*/
vm_page_t
-vm_phys_alloc_freelist_pages(int domain, int flind, int pool, int order)
+vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
{
struct vm_freelist *alt, *fl;
vm_page_t m;
- int oind, pind;
+ int oind, pind, flind;
KASSERT(domain >= 0 && domain < vm_ndomains,
("vm_phys_alloc_freelist_pages: domain %d is out of range",
domain));
- KASSERT(flind < VM_NFREELIST,
+ KASSERT(freelist < VM_NFREELIST,
("vm_phys_alloc_freelist_pages: freelist %d is out of range",
flind));
KASSERT(pool < VM_NFREEPOOL,
@@ -638,6 +638,11 @@ vm_phys_alloc_freelist_pages(int domain, int flind, int pool, int order)
KASSERT(order < VM_NFREEORDER,
("vm_phys_alloc_freelist_pages: order %d is out of range", order));
+ flind = vm_freelist_to_flind[freelist];
+ /* Check if freelist is present */
+ if (flind < 0)
+ return (NULL);
+
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
fl = &vm_phys_free_queues[domain][flind][pool][0];
for (oind = order; oind < VM_NFREEORDER; oind++) {