aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2020-07-29 19:38:49 +0000
committerMark Johnston <markj@FreeBSD.org>2020-07-29 19:38:49 +0000
commit958d8f527c29658bd55a67e5e970de4447cc47e4 (patch)
treeaa8f3aeae7917203ee20a54f4aaaf953ac5737e3 /sys/vm/vm_page.c
parent12b2f3daaa597f346a4b0065bf7f75378524ef88 (diff)
downloadsrc-958d8f527c29658bd55a67e5e970de4447cc47e4.tar.gz
src-958d8f527c29658bd55a67e5e970de4447cc47e4.zip
Remove the volatile qualifier from busy_lock.
Use atomic(9) to load the lock state. Some places were doing this already, so it was inconsistent. In initialization code, the lock state is still initialized with plain stores. Reviewed by: alc, kib Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D25861
Notes
Notes: svn path=/head/; revision=363671
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 8c2df3e78e17..ce107645d6a6 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -908,7 +908,7 @@ vm_page_busy_downgrade(vm_page_t m)
vm_page_assert_xbusied(m);
- x = m->busy_lock;
+ x = vm_page_busy_fetch(m);
for (;;) {
if (atomic_fcmpset_rel_int(&m->busy_lock,
&x, VPB_SHARERS_WORD(1)))
@@ -931,7 +931,7 @@ vm_page_busy_tryupgrade(vm_page_t m)
vm_page_assert_sbusied(m);
- x = m->busy_lock;
+ x = vm_page_busy_fetch(m);
ce = VPB_CURTHREAD_EXCLUSIVE;
for (;;) {
if (VPB_SHARERS(x) > 1)
@@ -955,7 +955,7 @@ vm_page_sbusied(vm_page_t m)
{
u_int x;
- x = m->busy_lock;
+ x = vm_page_busy_fetch(m);
return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
}
@@ -971,7 +971,7 @@ vm_page_sunbusy(vm_page_t m)
vm_page_assert_sbusied(m);
- x = m->busy_lock;
+ x = vm_page_busy_fetch(m);
for (;;) {
KASSERT(x != VPB_FREED,
("vm_page_sunbusy: Unlocking freed page."));
@@ -1072,7 +1072,7 @@ _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
sleepq_lock(m);
- x = atomic_load_int(&m->busy_lock);
+ x = vm_page_busy_fetch(m);
do {
/*
* If the page changes objects or becomes unlocked we can
@@ -1110,7 +1110,7 @@ vm_page_trysbusy(vm_page_t m)
u_int x;
obj = m->object;
- x = m->busy_lock;
+ x = vm_page_busy_fetch(m);
for (;;) {
if ((x & VPB_BIT_SHARED) == 0)
return (0);
@@ -1146,7 +1146,7 @@ vm_page_tryxbusy(vm_page_t m)
{
vm_object_t obj;
- if (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,
+ if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
VPB_CURTHREAD_EXCLUSIVE) == 0)
return (0);
@@ -1354,7 +1354,7 @@ vm_page_readahead_finish(vm_page_t m)
* have shown that deactivating the page is usually the best choice,
* unless the page is wanted by another thread.
*/
- if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
+ if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
vm_page_activate(m);
else
vm_page_deactivate(m);
@@ -1719,7 +1719,7 @@ vm_page_busy_release(vm_page_t m)
{
u_int x;
- x = atomic_load_int(&m->busy_lock);
+ x = vm_page_busy_fetch(m);
for (;;) {
if (x == VPB_FREED)
break;