aboutsummaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2007-05-18 07:10:50 +0000
committerJeff Roberson <jeff@FreeBSD.org>2007-05-18 07:10:50 +0000
commit222d01951f8677015e3e96c6950e809c0d983c09 (patch)
treec94b660d4b9246fed8cbeadf7851932258d8b72a /sys/vm
parent2b7e2ee7a505adbd7f7355bcaadd8573174464e6 (diff)
downloadsrc-222d01951f8677015e3e96c6950e809c0d983c09.tar.gz
src-222d01951f8677015e3e96c6950e809c0d983c09.zip
- define and use VMCNT_{GET,SET,ADD,SUB,PTR} macros for manipulating
vmcnts. This can be used to abstract away pcpu details but also changes to use atomics for all counters now. This means sched lock is no longer responsible for protecting counts in the switch routines. Contributed by: Attilio Rao <attilio@FreeBSD.org>
Notes
Notes: svn path=/head/; revision=169667
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c16
-rw-r--r--sys/vm/uma_core.c2
-rw-r--r--sys/vm/vm_contig.c4
-rw-r--r--sys/vm/vm_fault.c3
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_map.c4
-rw-r--r--sys/vm/vm_meter.c122
-rw-r--r--sys/vm/vm_mmap.c2
-rw-r--r--sys/vm/vm_object.c2
-rw-r--r--sys/vm/vm_page.c70
-rw-r--r--sys/vm/vm_pageout.c94
-rw-r--r--sys/vm/vm_pageq.c12
-rw-r--r--sys/vm/vm_zeroidle.c7
-rw-r--r--sys/vm/vnode_pager.c19
14 files changed, 184 insertions, 177 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 906d3f79fd9a..5ef6a7000b02 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -385,7 +385,7 @@ swap_pager_swap_init(void)
* can hold 16 pages, so this is probably overkill. This reservation
* is typically limited to around 32MB by default.
*/
- n = cnt.v_page_count / 2;
+ n = VMCNT_GET(page_count) / 2;
if (maxswzone && n > maxswzone / sizeof(struct swblock))
n = maxswzone / sizeof(struct swblock);
n2 = n;
@@ -1037,8 +1037,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
}
bp->b_npages = j - i;
- cnt.v_swapin++;
- cnt.v_swappgsin += bp->b_npages;
+ VMCNT_ADD(swapin, 1);
+ VMCNT_ADD(swappgsin, bp->b_npages);
/*
* We still hold the lock on mreq, and our automatic completion routine
@@ -1072,7 +1072,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
vm_page_lock_queues();
vm_page_flag_set(mreq, PG_REFERENCED);
vm_page_unlock_queues();
- cnt.v_intrans++;
+ VMCNT_ADD(intrans, 1);
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
printf(
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
@@ -1263,8 +1263,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_dirtyoff = 0;
bp->b_dirtyend = bp->b_bcount;
- cnt.v_swapout++;
- cnt.v_swappgsout += bp->b_npages;
+ VMCNT_ADD(swapout, 1);
+ VMCNT_ADD(swappgsout, bp->b_npages);
/*
* asynchronous
@@ -2135,8 +2135,8 @@ swapoff_one(struct swdevt *sp, struct thread *td)
* of data we will have to page back in, plus an epsilon so
* the system doesn't become critically low on swap space.
*/
- if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
- nblks + nswap_lowat) {
+ if (VMCNT_GET(free_count) + VMCNT_GET(cache_count) +
+ swap_pager_avail < nblks + nswap_lowat) {
return (ENOMEM);
}
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index eb00bfe3c3a1..dc87672520bb 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -271,7 +271,7 @@ SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
static void
bucket_enable(void)
{
- if (cnt.v_free_count < cnt.v_free_min)
+ if (VMCNT_GET(free_count) < VMCNT_GET(free_min))
bucketdisable = 1;
else
bucketdisable = 0;
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index b26c46f6c9ea..8278c141df5c 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -204,7 +204,7 @@ again:
* Find first page in array that is free, within range,
* aligned, and such that the boundary won't be crossed.
*/
- for (i = start; i < cnt.v_page_count; i++) {
+ for (i = start; i < VMCNT_GET(page_count); i++) {
phys = VM_PAGE_TO_PHYS(&pga[i]);
pqtype = pga[i].queue - pga[i].pc;
if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
@@ -217,7 +217,7 @@ again:
/*
* If the above failed or we will exceed the upper bound, fail.
*/
- if ((i == cnt.v_page_count) ||
+ if ((i == VMCNT_GET(page_count)) ||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
mtx_unlock(&vm_page_queue_free_mtx);
/*
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 90ffa6fbbe5c..63d804aba9b8 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1271,7 +1271,8 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
* try to do any readahead that we might have free pages for.
*/
if ((rahead + rbehind) >
- ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
+ ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) -
+ VMCNT_GET(free_reserved))) {
pagedaemon_wakeup();
marray[0] = m;
*reqpage = 0;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index a3749bf4ee21..4981efcc0f02 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -219,7 +219,7 @@ vslock(void *addr, size_t len)
* Also, the sysctl code, which is the only present user
* of vslock(), does a hard loop on EAGAIN.
*/
- if (npages + cnt.v_wire_count > vm_page_max_wired)
+ if (npages + VMCNT_GET(wire_count) > vm_page_max_wired)
return (EAGAIN);
#endif
error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
@@ -589,7 +589,7 @@ vm_init_limits(udata)
limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
/* limit the limit to no less than 2MB */
- rss_limit = max(cnt.v_free_count, 512);
+ rss_limit = max(VMCNT_GET(free_count), 512);
limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index d22bcb061f72..9ffa896a9b9d 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -274,7 +274,7 @@ vmspace_alloc(min, max)
void
vm_init2(void)
{
- uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
+ uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(VMCNT_GET(page_count),
(VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 +
maxproc * 2 + maxfiles);
vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
@@ -1488,7 +1488,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
* free pages allocating pv entries.
*/
if ((flags & MAP_PREFAULT_MADVISE) &&
- cnt.v_free_count < cnt.v_free_reserved) {
+ VMCNT_GET(free_count) < VMCNT_GET(free_reserved)) {
psize = tmpidx;
break;
}
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index d4b51e727793..14c9f5d2262b 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -52,26 +52,26 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <sys/sysctl.h>
-struct vmmeter cnt;
+volatile struct vmmeter cnt;
int maxslp = MAXSLP;
SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
- CTLFLAG_RW, &cnt.v_free_min, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(free_min), 0, "");
SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
- CTLFLAG_RW, &cnt.v_free_target, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(free_target), 0, "");
SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
- CTLFLAG_RW, &cnt.v_free_reserved, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(free_reserved), 0, "");
SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
- CTLFLAG_RW, &cnt.v_inactive_target, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(inactive_target), 0, "");
SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
- CTLFLAG_RW, &cnt.v_cache_min, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(cache_min), 0, "");
SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
- CTLFLAG_RW, &cnt.v_cache_max, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(cache_max), 0, "");
SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
- CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(pageout_free_min), 0, "");
SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
- CTLFLAG_RW, &cnt.v_free_severe, 0, "");
+ CTLFLAG_RW, VMCNT_PTR(free_severe), 0, "");
static int
sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
@@ -235,7 +235,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
}
}
mtx_unlock(&vm_object_list_mtx);
- total.t_free = cnt.v_free_count + cnt.v_cache_count;
+ total.t_free = VMCNT_GET(free_count) + VMCNT_GET(cache_count);
return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
}
@@ -255,7 +255,7 @@ static int
vcnt(SYSCTL_HANDLER_ARGS)
{
int count = *(int *)arg1;
- int offset = (char *)arg1 - (char *)&cnt;
+ int offset = (char *)arg1 - (char *)VMCNT;
#ifdef SMP
int i;
@@ -280,101 +280,103 @@ static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0,
SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_swtch, 0, vcnt, "IU", "Context switches");
+ VMCNT_PTR(swtch), 0, vcnt, "IU", "Context switches");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_trap, 0, vcnt, "IU", "Traps");
+ VMCNT_PTR(trap), 0, vcnt, "IU", "Traps");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_syscall, 0, vcnt, "IU", "Syscalls");
+ VMCNT_PTR(syscall), 0, vcnt, "IU", "Syscalls");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_intr, 0, vcnt, "IU", "Hardware interrupts");
+ VMCNT_PTR(intr), 0, vcnt, "IU", "Hardware interrupts");
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_soft, 0, vcnt, "IU", "Software interrupts");
+ VMCNT_PTR(soft), 0, vcnt, "IU", "Software interrupts");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vm_faults, 0, vcnt, "IU", "VM faults");
+ VMCNT_PTR(vm_faults), 0, vcnt, "IU", "VM faults");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_cow_faults, 0, vcnt, "IU", "COW faults");
+ VMCNT_PTR(cow_faults), 0, vcnt, "IU", "COW faults");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_cow_optim, 0, vcnt, "IU", "Optimized COW faults");
+ VMCNT_PTR(cow_optim), 0, vcnt, "IU", "Optimized COW faults");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_zfod, 0, vcnt, "IU", "Zero fill");
+ VMCNT_PTR(zfod), 0, vcnt, "IU", "Zero fill");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_ozfod, 0, vcnt, "IU", "Optimized zero fill");
+ VMCNT_PTR(ozfod), 0, vcnt, "IU", "Optimized zero fill");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_swapin, 0, vcnt, "IU", "Swapin operations");
+ VMCNT_PTR(swapin), 0, vcnt, "IU", "Swapin operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_swapout, 0, vcnt, "IU", "Swapout operations");
+ VMCNT_PTR(swapout), 0, vcnt, "IU", "Swapout operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_swappgsin, 0, vcnt, "IU", "Swapin pages");
+ VMCNT_PTR(swappgsin), 0, vcnt, "IU", "Swapin pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_swappgsout, 0, vcnt, "IU", "Swapout pages");
+ VMCNT_PTR(swappgsout), 0, vcnt, "IU", "Swapout pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vnodein, 0, vcnt, "IU", "Vnodein operations");
+ VMCNT_PTR(vnodein), 0, vcnt, "IU", "Vnodein operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vnodeout, 0, vcnt, "IU", "Vnodeout operations");
+ VMCNT_PTR(vnodeout), 0, vcnt, "IU", "Vnodeout operations");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vnodepgsin, 0, vcnt, "IU", "Vnodein pages");
+ VMCNT_PTR(vnodepgsin), 0, vcnt, "IU", "Vnodein pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vnodepgsout, 0, vcnt, "IU", "Vnodeout pages");
+ VMCNT_PTR(vnodepgsout), 0, vcnt, "IU", "Vnodeout pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_intrans, 0, vcnt, "IU", "In transit page blocking");
+ VMCNT_PTR(intrans), 0, vcnt, "IU", "In transit page blocking");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_reactivated, 0, vcnt, "IU", "Reactivated pages");
+ VMCNT_PTR(reactivated), 0, vcnt, "IU", "Reactivated pages");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_pdwakeups, 0, vcnt, "IU", "Pagedaemon wakeups");
+ VMCNT_PTR(pdwakeups), 0, vcnt, "IU", "Pagedaemon wakeups");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_pdpages, 0, vcnt, "IU", "Pagedaemon page scans");
+ VMCNT_PTR(pdpages), 0, vcnt, "IU", "Pagedaemon page scans");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_dfree, 0, vcnt, "IU", "");
+ VMCNT_PTR(dfree), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_pfree, 0, vcnt, "IU", "");
+ VMCNT_PTR(pfree), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_tfree, 0, vcnt, "IU", "");
+ VMCNT_PTR(tfree), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_size, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_page_size, 0, vcnt, "IU", "");
+ VMCNT_PTR(page_size), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_count, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_page_count, 0, vcnt, "IU", "");
+ VMCNT_PTR(page_count), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_reserved, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_free_reserved, 0, vcnt, "IU", "");
+ VMCNT_PTR(free_reserved), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_target, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_free_target, 0, vcnt, "IU", "");
+ VMCNT_PTR(free_target), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_min, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_free_min, 0, vcnt, "IU", "");
+ VMCNT_PTR(free_min), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_count, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_free_count, 0, vcnt, "IU", "");
+ VMCNT_PTR(free_count), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_wire_count, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_wire_count, 0, vcnt, "IU", "");
+ VMCNT_PTR(wire_count), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_active_count, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_active_count, 0, vcnt, "IU", "");
+ VMCNT_PTR(active_count), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_target, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_inactive_target, 0, vcnt, "IU", "");
+ VMCNT_PTR(inactive_target), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_count, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_inactive_count, 0, vcnt, "IU", "");
+ VMCNT_PTR(inactive_count), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_count, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_cache_count, 0, vcnt, "IU", "");
+ VMCNT_PTR(cache_count), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_min, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_cache_min, 0, vcnt, "IU", "");
+ VMCNT_PTR(cache_min), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_max, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_cache_max, 0, vcnt, "IU", "");
+ VMCNT_PTR(cache_max), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pageout_free_min, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_pageout_free_min, 0, vcnt, "IU", "");
-SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_interrupt_free_min, 0, vcnt, "IU", "");
+ VMCNT_PTR(pageout_free_min), 0, vcnt, "IU", "");
+SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT |
+ CTLFLAG_RD, VMCNT_PTR(interrupt_free_min), 0, vcnt, "IU", "");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_forks, 0, vcnt, "IU", "Number of fork() calls");
+ VMCNT_PTR(forks), 0, vcnt, "IU", "Number of fork() calls");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vforks, 0, vcnt, "IU", "Number of vfork() calls");
+ VMCNT_PTR(vforks), 0, vcnt, "IU", "Number of vfork() calls");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_rforks, 0, vcnt, "IU", "Number of rfork() calls");
+ VMCNT_PTR(rforks), 0, vcnt, "IU", "Number of rfork() calls");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_kthreads, 0, vcnt, "IU", "Number of fork() calls by kernel");
+ VMCNT_PTR(kthreads), 0, vcnt, "IU",
+ "Number of fork() calls by kernel");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_forkpages, 0, vcnt, "IU", "VM pages affected by fork()");
+ VMCNT_PTR(forkpages), 0, vcnt, "IU", "VM pages affected by fork()");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_vforkpages, 0, vcnt, "IU", "VM pages affected by vfork()");
+ VMCNT_PTR(vforkpages), 0, vcnt, "IU", "VM pages affected by vfork()");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_rforkpages, 0, vcnt, "IU", "VM pages affected by rfork()");
+ VMCNT_PTR(rforkpages), 0, vcnt, "IU", "VM pages affected by rfork()");
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
- &cnt.v_kthreadpages, 0, vcnt, "IU", "VM pages affected by fork() by kernel");
+ VMCNT_PTR(kthreadpages), 0, vcnt, "IU",
+ "VM pages affected by fork() by kernel");
SYSCTL_INT(_vm_stats_misc, OID_AUTO,
zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0, "");
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index ff1ba18bea8f..901ff13d0ecd 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -974,7 +974,7 @@ mlock(td, uap)
return (ENOMEM);
}
PROC_UNLOCK(proc);
- if (npages + cnt.v_wire_count > vm_page_max_wired)
+ if (npages + VMCNT_GET(wire_count) > vm_page_max_wired)
return (EAGAIN);
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index dfcade1d8c6a..c12095adc366 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object)
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
if (p->wire_count == 0) {
vm_page_free(p);
- cnt.v_pfree++;
+ VMCNT_ADD(pfree, 1);
} else {
vm_page_remove(p);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index d4f8148812d6..f4f0a20e7f55 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -151,9 +151,9 @@ SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
void
vm_set_page_size(void)
{
- if (cnt.v_page_size == 0)
- cnt.v_page_size = PAGE_SIZE;
- if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
+ if (VMCNT_GET(page_size) == 0)
+ VMCNT_SET(page_size, PAGE_SIZE);
+ if (((VMCNT_GET(page_size) - 1) & VMCNT_GET(page_size)) != 0)
panic("vm_set_page_size: page size not a power of two");
}
@@ -357,8 +357,8 @@ vm_page_startup(vm_offset_t vaddr)
* last rather than first. On large-memory machines, this avoids
* the exhaustion of low physical memory before isa_dma_init has run.
*/
- cnt.v_page_count = 0;
- cnt.v_free_count = 0;
+ VMCNT_SET(page_count, 0);
+ VMCNT_SET(free_count, 0);
list = getenv("vm.blacklist");
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
pa = phys_avail[i];
@@ -874,11 +874,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
loop:
mtx_lock(&vm_page_queue_free_mtx);
- if (cnt.v_free_count > cnt.v_free_reserved ||
+ if (VMCNT_GET(free_count) > VMCNT_GET(free_reserved) ||
(page_req == VM_ALLOC_SYSTEM &&
- cnt.v_cache_count == 0 &&
- cnt.v_free_count > cnt.v_interrupt_free_min) ||
- (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
+ VMCNT_GET(cache_count) == 0 &&
+ VMCNT_GET(free_count) > VMCNT_GET(interrupt_free_min)) ||
+ (page_req == VM_ALLOC_INTERRUPT && VMCNT_GET(free_count) > 0)) {
/*
* Allocate from the free queue if the number of free pages
* exceeds the minimum for the request class.
@@ -893,9 +893,9 @@ loop:
*/
vm_page_lock_queues();
if ((m = vm_page_select_cache(color)) == NULL) {
- KASSERT(cnt.v_cache_count == 0,
+ KASSERT(VMCNT_GET(cache_count) == 0,
("vm_page_alloc: cache queue is missing %d pages",
- cnt.v_cache_count));
+ VMCNT_GET(cache_count)));
vm_page_unlock_queues();
atomic_add_int(&vm_pageout_deficit, 1);
pagedaemon_wakeup();
@@ -904,7 +904,8 @@ loop:
return (NULL);
mtx_lock(&vm_page_queue_free_mtx);
- if (cnt.v_free_count <= cnt.v_interrupt_free_min) {
+ if (VMCNT_GET(free_count) <=
+ VMCNT_GET(interrupt_free_min)) {
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
@@ -954,7 +955,7 @@ loop:
else
m->oflags = VPO_BUSY;
if (req & VM_ALLOC_WIRED) {
- atomic_add_int(&cnt.v_wire_count, 1);
+ VMCNT_ADD(wire_count, 1);
m->wire_count = 1;
} else
m->wire_count = 0;
@@ -1000,8 +1001,8 @@ vm_wait(void)
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
- "vmwait", 0);
+ msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP |
+ PVM, "vmwait", 0);
}
}
@@ -1024,7 +1025,7 @@ vm_waitpfault(void)
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
+ msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | PUSER,
"pfault", 0);
}
@@ -1045,7 +1046,7 @@ vm_page_activate(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
- cnt.v_reactivated++;
+ VMCNT_ADD(reactivated, 1);
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@@ -1078,7 +1079,8 @@ vm_page_free_wakeup(void)
* some free.
*/
if (vm_pageout_pages_needed &&
- cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
+ VMCNT_GET(cache_count) + VMCNT_GET(free_count) >=
+ VMCNT_GET(pageout_free_min)) {
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
@@ -1089,7 +1091,7 @@ vm_page_free_wakeup(void)
*/
if (vm_pages_needed && !vm_page_count_min()) {
vm_pages_needed = 0;
- wakeup(&cnt.v_free_count);
+ wakeup(VMCNT_PTR(free_count));
}
}
@@ -1112,7 +1114,7 @@ vm_page_free_toq(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
- cnt.v_tfree++;
+ VMCNT_ADD(tfree, 1);
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
printf(
@@ -1203,7 +1205,7 @@ vm_page_wire(vm_page_t m)
if (m->wire_count == 0) {
if ((m->flags & PG_UNMANAGED) == 0)
vm_pageq_remove(m);
- atomic_add_int(&cnt.v_wire_count, 1);
+ VMCNT_ADD(wire_count, 1);
}
m->wire_count++;
KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
@@ -1247,7 +1249,7 @@ vm_page_unwire(vm_page_t m, int activate)
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {
- atomic_subtract_int(&cnt.v_wire_count, 1);
+ VMCNT_DEC(wire_count, 1);
if (m->flags & PG_UNMANAGED) {
;
} else if (activate)
@@ -1286,7 +1288,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
- cnt.v_reactivated++;
+ VMCNT_ADD(reactivated, 1);
vm_page_flag_clear(m, PG_WINATCFLS);
vm_pageq_remove(m);
if (athead)
@@ -1295,7 +1297,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
vm_page_queues[PQ_INACTIVE].lcnt++;
- cnt.v_inactive_count++;
+ VMCNT_ADD(inactive_count, 1);
}
}
@@ -1780,16 +1782,16 @@ vm_page_cowsetup(vm_page_t m)
DB_SHOW_COMMAND(page, vm_page_print_page_info)
{
- db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
- db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
- db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
- db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
- db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
- db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
- db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
- db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
- db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
- db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
+ db_printf("cnt.v_free_count: %d\n", VMCNT_GET(free_count));
+ db_printf("cnt.v_cache_count: %d\n", VMCNT_GET(cache_count));
+ db_printf("cnt.v_inactive_count: %d\n", VMCNT_GET(inactive_count));
+ db_printf("cnt.v_active_count: %d\n", VMCNT_GET(active_count));
+ db_printf("cnt.v_wire_count: %d\n", VMCNT_GET(wire_count));
+ db_printf("cnt.v_free_reserved: %d\n", VMCNT_GET(free_reserved));
+ db_printf("cnt.v_free_min: %d\n", VMCNT_GET(free_min));
+ db_printf("cnt.v_free_target: %d\n", VMCNT_GET(free_target));
+ db_printf("cnt.v_cache_min: %d\n", VMCNT_GET(cache_min));
+ db_printf("cnt.v_inactive_target: %d\n", VMCNT_GET(inactive_target));
}
DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c0611ba0570d..d3c14ba885c7 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
goto unlock_return;
}
next = TAILQ_NEXT(p, listq);
- cnt.v_pdpages++;
+ VMCNT_ADD(pdpages, 1);
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
@@ -739,13 +739,13 @@ vm_pageout_scan(int pass)
vm_page_lock_queues();
rescan0:
addl_page_shortage = addl_page_shortage_init;
- maxscan = cnt.v_inactive_count;
+ maxscan = VMCNT_GET(inactive_count);
for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
- cnt.v_pdpages++;
+ VMCNT_ADD(pdpages, 1);
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
@@ -856,7 +856,7 @@ rescan0:
* Invalid pages can be easily freed
*/
vm_page_free(m);
- cnt.v_dfree++;
+ VMCNT_ADD(dfree, 1);
--page_shortage;
} else if (m->dirty == 0) {
/*
@@ -1043,8 +1043,8 @@ unlock_and_continue:
* Compute the number of pages we want to try to move from the
* active queue to the inactive queue.
*/
- page_shortage = vm_paging_target() +
- cnt.v_inactive_target - cnt.v_inactive_count;
+ page_shortage = vm_paging_target() + VMCNT_GET(inactive_target) -
+ VMCNT_GET(inactive_count);
page_shortage += addl_page_shortage;
/*
@@ -1052,7 +1052,7 @@ unlock_and_continue:
* track the per-page activity counter and use it to locate
* deactivation candidates.
*/
- pcount = cnt.v_active_count;
+ pcount = VMCNT_GET(active_count);
m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
@@ -1089,7 +1089,7 @@ unlock_and_continue:
* The count for pagedaemon pages is done after checking the
* page for eligibility...
*/
- cnt.v_pdpages++;
+ VMCNT_ADD(pdpages, 1);
/*
* Check to see "how much" the page has been used.
@@ -1149,8 +1149,9 @@ unlock_and_continue:
*/
cache_cur = cache_last_free;
cache_first_failure = -1;
- while (cnt.v_free_count < cnt.v_free_reserved && (cache_cur =
- (cache_cur + PQ_PRIME2) & PQ_COLORMASK) != cache_first_failure) {
+ while (VMCNT_GET(free_count) < VMCNT_GET(free_reserved) &&
+ (cache_cur = (cache_cur + PQ_PRIME2) & PQ_COLORMASK) !=
+ cache_first_failure) {
TAILQ_FOREACH(m, &vm_page_queues[PQ_CACHE + cache_cur].pl,
pageq) {
KASSERT(m->dirty == 0,
@@ -1168,7 +1169,7 @@ unlock_and_continue:
m));
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
- cnt.v_dfree++;
+ VMCNT_ADD(dfree, 1);
cache_last_free = cache_cur;
cache_first_failure = -1;
break;
@@ -1291,7 +1292,7 @@ unlock_and_continue:
sched_nice(bigproc, PRIO_MIN);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(bigproc);
- wakeup(&cnt.v_free_count);
+ wakeup(VMCNT_PTR(free_count));
}
}
mtx_unlock(&Giant);
@@ -1314,16 +1315,18 @@ vm_pageout_page_stats()
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
page_shortage =
- (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
- (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
+ (VMCNT_GET(inactive_target) + VMCNT_GET(cache_max) +
+ VMCNT_GET(free_min)) - (VMCNT_GET(free_count) +
+ VMCNT_GET(inactive_count) + VMCNT_GET(cache_count));
if (page_shortage <= 0)
return;
- pcount = cnt.v_active_count;
+ pcount = VMCNT_GET(active_count);
fullintervalcount += vm_pageout_stats_interval;
if (fullintervalcount < vm_pageout_full_stats_interval) {
- tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
+ tpcount = (vm_pageout_stats_max * VMCNT_GET(active_count)) /
+ VMCNT_GET(page_count);
if (pcount > tpcount)
pcount = tpcount;
} else {
@@ -1409,8 +1412,8 @@ vm_pageout()
/*
* Initialize some paging parameters.
*/
- cnt.v_interrupt_free_min = 2;
- if (cnt.v_page_count < 2000)
+ VMCNT_SET(interrupt_free_min, 2);
+ if (VMCNT_GET(page_count) < 2000)
vm_pageout_page_count = 8;
/*
@@ -1418,17 +1421,16 @@ vm_pageout()
* swap pager structures plus enough for any pv_entry structs
* when paging.
*/
- if (cnt.v_page_count > 1024)
- cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
- else
- cnt.v_free_min = 4;
- cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
- cnt.v_interrupt_free_min;
- cnt.v_free_reserved = vm_pageout_page_count +
- cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
- cnt.v_free_severe = cnt.v_free_min / 2;
- cnt.v_free_min += cnt.v_free_reserved;
- cnt.v_free_severe += cnt.v_free_reserved;
+ VMCNT_SET(free_min, (VMCNT_GET(page_count) > 1024) ? (4 +
+ (VMCNT_GET(page_count) - 1024) / 200) : 4);
+ VMCNT_SET(pageout_free_min, (2 * MAXBSIZE) / PAGE_SIZE +
+ VMCNT_GET(interrupt_free_min));
+ VMCNT_SET(free_reserved, vm_pageout_page_count +
+ VMCNT_GET(pageout_free_min) + (VMCNT_GET(page_count) / 768) +
+ PQ_NUMCOLORS);
+ VMCNT_SET(free_severe, VMCNT_GET(free_min) / 2);
+ VMCNT_ADD(free_min, VMCNT_GET(free_reserved));
+ VMCNT_ADD(free_severe, VMCNT_GET(free_reserved));
/*
* v_free_target and v_cache_min control pageout hysteresis. Note
@@ -1441,29 +1443,27 @@ vm_pageout()
* be big enough to handle memory needs while the pageout daemon
* is signalled and run to free more pages.
*/
- if (cnt.v_free_count > 6144)
- cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
- else
- cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
-
- if (cnt.v_free_count > 2048) {
- cnt.v_cache_min = cnt.v_free_target;
- cnt.v_cache_max = 2 * cnt.v_cache_min;
- cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
+ VMCNT_SET(free_target, ((VMCNT_GET(free_count) > 6144) ? 4 : 2) *
+ VMCNT_GET(free_min) + VMCNT_GET(free_reserved));
+
+ if (VMCNT_GET(free_count) > 2048) {
+ VMCNT_SET(cache_min, VMCNT_GET(free_target));
+ VMCNT_SET(cache_max, 2 * VMCNT_GET(cache_min));
+ VMCNT_SET(inactive_target, (3 * VMCNT_GET(free_target) / 2));
} else {
- cnt.v_cache_min = 0;
- cnt.v_cache_max = 0;
- cnt.v_inactive_target = cnt.v_free_count / 4;
+ VMCNT_SET(cache_min, 0);
+ VMCNT_SET(cache_max, 0);
+ VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 4);
}
- if (cnt.v_inactive_target > cnt.v_free_count / 3)
- cnt.v_inactive_target = cnt.v_free_count / 3;
+ if (VMCNT_GET(inactive_target) > VMCNT_GET(free_count) / 3)
+ VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 3);
/* XXX does not really belong here */
if (vm_page_max_wired == 0)
- vm_page_max_wired = cnt.v_free_count / 3;
+ vm_page_max_wired = VMCNT_GET(free_count) / 3;
if (vm_pageout_stats_max == 0)
- vm_pageout_stats_max = cnt.v_free_target;
+ vm_pageout_stats_max = VMCNT_GET(free_target);
/*
* Set interval in seconds for stats scan.
@@ -1489,7 +1489,7 @@ vm_pageout()
if (vm_pages_needed && !vm_page_count_min()) {
if (!vm_paging_needed())
vm_pages_needed = 0;
- wakeup(&cnt.v_free_count);
+ wakeup(VMCNT_PTR(free_count));
}
if (vm_pages_needed) {
/*
@@ -1524,7 +1524,7 @@ vm_pageout()
}
}
if (vm_pages_needed)
- cnt.v_pdwakeups++;
+ VMCNT_ADD(pdwakeups, 1);
mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}
diff --git a/sys/vm/vm_pageq.c b/sys/vm/vm_pageq.c
index 1b3e9a420ff4..6f6aadf46b03 100644
--- a/sys/vm/vm_pageq.c
+++ b/sys/vm/vm_pageq.c
@@ -140,14 +140,14 @@ vm_pageq_init(void)
vm_coloring_init();
for (i = 0; i < PQ_NUMCOLORS; ++i) {
- vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
+ vm_page_queues[PQ_FREE+i].cnt = VMCNT_PTR(free_count);
}
for (i = 0; i < PQ_NUMCOLORS; ++i) {
- vm_page_queues[PQ_CACHE + i].cnt = &cnt.v_cache_count;
+ vm_page_queues[PQ_CACHE + i].cnt = VMCNT_PTR(cache_count);
}
- vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
- vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
- vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
+ vm_page_queues[PQ_INACTIVE].cnt = VMCNT_PTR(inactive_count);
+ vm_page_queues[PQ_ACTIVE].cnt = VMCNT_PTR(active_count);
+ vm_page_queues[PQ_HOLD].cnt = VMCNT_PTR(active_count);
for (i = 0; i < PQ_COUNT; i++) {
TAILQ_INIT(&vm_page_queues[i].pl);
@@ -192,7 +192,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
{
vm_page_t m;
- atomic_add_int(&cnt.v_page_count, 1);
+ VMCNT_ADD(page_count, 1);
m = PHYS_TO_VM_PAGE(pa);
m->phys_addr = pa;
m->flags = 0;
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 5af84e01c188..29144ed99756 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -90,9 +90,10 @@ vm_page_zero_check(void)
* fast sleeps. We also do not want to be continuously zeroing
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
+ if (zero_state && vm_page_zero_count >=
+ ZIDLE_LO(VMCNT_GET(free_count)))
return (0);
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count)))
return (0);
return (1);
}
@@ -115,7 +116,7 @@ vm_page_zero_idle(void)
vm_pageq_enqueue(PQ_FREE + m->pc, m);
++vm_page_zero_count;
++cnt_prezero;
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
+ if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count)))
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK;
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index cb4cc39098cb..837aa46cbb36 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -728,8 +728,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
- cnt.v_vnodein++;
- cnt.v_vnodepgsin++;
+ VMCNT_ADD(vnodein, 1);
+ VMCNT_ADD(vnodepgsin, 1);
error = vnode_pager_input_old(object, m[reqpage]);
VM_OBJECT_UNLOCK(object);
return (error);
@@ -757,8 +757,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
- cnt.v_vnodein++;
- cnt.v_vnodepgsin++;
+ VMCNT_ADD(vnodein, 1);
+ VMCNT_ADD(vnodepgsin, 1);
return vnode_pager_input_smlfs(object, m[reqpage]);
}
@@ -909,8 +909,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
bp->b_runningbufspace = bp->b_bufsize;
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
- cnt.v_vnodein++;
- cnt.v_vnodepgsin += count;
+ VMCNT_ADD(vnodein, 1);
+ VMCNT_ADD(vnodepgsin, 1);
/* do the input */
bp->b_iooffset = dbtob(bp->b_blkno);
@@ -1031,7 +1031,8 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* daemon up. This should be probably be addressed XXX.
*/
- if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
+ if ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) <
+ VMCNT_GET(pageout_free_min))
sync |= OBJPC_SYNC;
/*
@@ -1157,8 +1158,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_resid = maxsize;
auio.uio_td = (struct thread *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
- cnt.v_vnodeout++;
- cnt.v_vnodepgsout += ncount;
+ VMCNT_ADD(vnodein, 1);
+ VMCNT_ADD(vnodepgsin, ncount);
if (error) {
if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))