aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/fs/smbfs/smbfs_io.c8
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/kern/subr_trap.c2
-rw-r--r--sys/nfsclient/nfs_bio.c8
-rw-r--r--sys/vm/swap_pager.c10
-rw-r--r--sys/vm/vm_object.c2
-rw-r--r--sys/vm/vm_page.c11
-rw-r--r--sys/vm/vm_pageout.c17
-rw-r--r--sys/vm/vnode_pager.c16
10 files changed, 44 insertions, 34 deletions
diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c
index 082f545ef2c2..98008be2b206 100644
--- a/sys/fs/smbfs/smbfs_io.c
+++ b/sys/fs/smbfs/smbfs_io.c
@@ -475,8 +475,8 @@ smbfs_getpages(ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- cnt.v_vnodein++;
- cnt.v_vnodepgsin += npages;
+ PCPU_INC(cnt.v_vnodein);
+ PCPU_ADD(cnt.v_vnodepgsin, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
@@ -626,8 +626,8 @@ smbfs_putpages(ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- cnt.v_vnodeout++;
- cnt.v_vnodepgsout += count;
+ PCPU_INC(cnt.v_vnodeout);
+ PCPU_ADD(cnt.v_vnodepgsout, count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index b75dcf29362d..c45e84692aba 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -412,7 +412,7 @@ mi_switch(int flags, struct thread *newtd)
td->td_runtime += new_switchtime - PCPU_GET(switchtime);
PCPU_SET(switchtime, new_switchtime);
td->td_generation++; /* bump preempt-detect counter */
- cnt.v_swtch++;
+ PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, p->p_comm);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index dcb00b770954..12a077c4315e 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -405,7 +405,7 @@ thread_exit(void)
p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
- cnt.v_swtch++;
+ PCPU_INC(cnt.v_swtch);
/*
* Aggregate this thread's tick stats in the parent so they are not
* lost. Also add the child usage to our own when the final thread
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index e9d9c3552b08..f839ace03637 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -190,8 +190,8 @@ ast(struct trapframe *framep)
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_INTERRUPT);
- cnt.v_trap++;
mtx_unlock_spin(&sched_lock);
+ PCPU_INC(cnt.v_trap);
/*
* XXXKSE While the fact that we owe a user profiling
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index e42a35653116..147f1905e222 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -159,8 +159,8 @@ nfs_getpages(struct vop_getpages_args *ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- cnt.v_vnodein++;
- cnt.v_vnodepgsin += npages;
+ PCPU_INC(cnt.v_vnodein);
+ PCPU_ADD(cnt.v_vnodepgsin, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
@@ -323,8 +323,8 @@ nfs_putpages(struct vop_putpages_args *ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- cnt.v_vnodeout++;
- cnt.v_vnodepgsout += count;
+ PCPU_INC(cnt.v_vnodeout);
+ PCPU_ADD(cnt.v_vnodepgsout, count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 121c4a5e22e6..7d51bc376209 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1037,8 +1037,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
}
bp->b_npages = j - i;
- cnt.v_swapin++;
- cnt.v_swappgsin += bp->b_npages;
+ PCPU_INC(cnt.v_swapin);
+ PCPU_ADD(cnt.v_swappgsin, bp->b_npages);
/*
* We still hold the lock on mreq, and our automatic completion routine
@@ -1072,7 +1072,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
vm_page_lock_queues();
vm_page_flag_set(mreq, PG_REFERENCED);
vm_page_unlock_queues();
- cnt.v_intrans++;
+ PCPU_INC(cnt.v_intrans);
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
printf(
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
@@ -1263,8 +1263,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_dirtyoff = 0;
bp->b_dirtyend = bp->b_bcount;
- cnt.v_swapout++;
- cnt.v_swappgsout += bp->b_npages;
+ PCPU_INC(cnt.v_swapout);
+ PCPU_ADD(cnt.v_swappgsout, bp->b_npages);
/*
* asynchronous
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index dfcade1d8c6a..a6de9185217c 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object)
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
if (p->wire_count == 0) {
vm_page_free(p);
- cnt.v_pfree++;
+ PCPU_INC(cnt.v_pfree);
} else {
vm_page_remove(p);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index d4f8148812d6..263ee05dab37 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1045,7 +1045,7 @@ vm_page_activate(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
- cnt.v_reactivated++;
+ PCPU_INC(cnt.v_reactivated);
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@@ -1112,7 +1112,7 @@ vm_page_free_toq(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
- cnt.v_tfree++;
+ PCPU_INC(cnt.v_tfree);
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
printf(
@@ -1286,7 +1286,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
- cnt.v_reactivated++;
+ PCPU_INC(cnt.v_reactivated);
vm_page_flag_clear(m, PG_WINATCFLS);
vm_pageq_remove(m);
if (athead)
@@ -1295,6 +1295,11 @@ _vm_page_deactivate(vm_page_t m, int athead)
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
vm_page_queues[PQ_INACTIVE].lcnt++;
+
+ /*
+ * Just not use an atomic here since vm_page_queues_lock
+ * alredy protects this field.
+ */
cnt.v_inactive_count++;
}
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c0611ba0570d..dcf69ef621f4 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
goto unlock_return;
}
next = TAILQ_NEXT(p, listq);
- cnt.v_pdpages++;
+ PCPU_INC(cnt.v_pdpages);
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
@@ -745,7 +745,7 @@ rescan0:
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
- cnt.v_pdpages++;
+ PCPU_INC(cnt.v_pdpages);
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
@@ -856,7 +856,7 @@ rescan0:
* Invalid pages can be easily freed
*/
vm_page_free(m);
- cnt.v_dfree++;
+ PCPU_INC(cnt.v_dfree);
--page_shortage;
} else if (m->dirty == 0) {
/*
@@ -1089,7 +1089,7 @@ unlock_and_continue:
* The count for pagedaemon pages is done after checking the
* page for eligibility...
*/
- cnt.v_pdpages++;
+ PCPU_INC(cnt.v_pdpages);
/*
* Check to see "how much" the page has been used.
@@ -1168,7 +1168,7 @@ unlock_and_continue:
m));
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
- cnt.v_dfree++;
+ PCPU_INC(cnt.v_dfree);
cache_last_free = cache_cur;
cache_first_failure = -1;
break;
@@ -1427,6 +1427,11 @@ vm_pageout()
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
cnt.v_free_severe = cnt.v_free_min / 2;
+
+ /*
+ * Here adds don't need to be atomic since we are only initializing
+ * v_free_min and v_free_severe.
+ */
cnt.v_free_min += cnt.v_free_reserved;
cnt.v_free_severe += cnt.v_free_reserved;
@@ -1524,7 +1529,7 @@ vm_pageout()
}
}
if (vm_pages_needed)
- cnt.v_pdwakeups++;
+ PCPU_INC(cnt.v_pdwakeups);
mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index cb4cc39098cb..a4cadf74123c 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -728,8 +728,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
- cnt.v_vnodein++;
- cnt.v_vnodepgsin++;
+ PCPU_INC(cnt.v_vnodein);
+ PCPU_INC(cnt.v_vnodepgsin);
error = vnode_pager_input_old(object, m[reqpage]);
VM_OBJECT_UNLOCK(object);
return (error);
@@ -757,8 +757,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
- cnt.v_vnodein++;
- cnt.v_vnodepgsin++;
+ PCPU_INC(cnt.v_vnodein);
+ PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);
}
@@ -909,8 +909,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
bp->b_runningbufspace = bp->b_bufsize;
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
- cnt.v_vnodein++;
- cnt.v_vnodepgsin += count;
+ PCPU_INC(cnt.v_vnodein);
+ PCPU_ADD(cnt.v_vnodepgsin, count);
/* do the input */
bp->b_iooffset = dbtob(bp->b_blkno);
@@ -1157,8 +1157,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_resid = maxsize;
auio.uio_td = (struct thread *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
- cnt.v_vnodeout++;
- cnt.v_vnodepgsout += ncount;
+ PCPU_INC(cnt.v_vnodeout);
+ PCPU_ADD(cnt.v_vnodepgsout, ncount);
if (error) {
if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))