diff options
author | Alan Cox <alc@FreeBSD.org> | 2004-01-08 20:48:26 +0000 |
---|---|---|
committer | Alan Cox <alc@FreeBSD.org> | 2004-01-08 20:48:26 +0000 |
commit | 65bae14d771082002e13fd64b6a1a793bfa8e259 (patch) | |
tree | 93969e6896f2afbaefcb25fb23dc6cfbc7fa03cb /sys/vm/vm_contig.c | |
parent | 986a96490048b1ac87b891350db9baa377b488e3 (diff) | |
download | src-65bae14d771082002e13fd64b6a1a793bfa8e259.tar.gz src-65bae14d771082002e13fd64b6a1a793bfa8e259.zip |
- Enable recursive acquisition of the mutex synchronizing access to the
free pages queue. This is presently needed by contigmalloc1().
- Move a sanity check against attempted double allocation of two pages
to the same vm object offset from vm_page_alloc() to vm_page_insert().
This provides better protection because double allocation could occur
through a direct call to vm_page_insert(), such as that by
vm_page_rename().
- Modify contigmalloc1() to hold the mutex synchronizing access to the
free pages queue while it scans vm_page_array in search of free pages.
- Correct a potential leak of pages by contigmalloc1() that I introduced
in revision 1.20: We must convert all cache queue pages to free pages
before we begin removing free pages from the free queue. Otherwise,
if we have to restart the scan because we are unable to acquire the
vm object lock that is necessary to convert a cache queue page to a
free page, we leak those free pages already removed from the free queue.
Notes
Notes:
svn path=/head/; revision=124261
Diffstat (limited to 'sys/vm/vm_contig.c')
-rw-r--r-- | sys/vm/vm_contig.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c index 3628b3716422..68adc460a47c 100644 --- a/sys/vm/vm_contig.c +++ b/sys/vm/vm_contig.c @@ -168,6 +168,7 @@ contigmalloc1( for (pass = 0; pass <= 1; pass++) { s = splvm(); vm_page_lock_queues(); + mtx_lock_spin(&vm_page_queue_free_mtx); again: /* * Find first page in array that is free, within range, @@ -188,6 +189,7 @@ again: */ if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { + mtx_unlock_spin(&vm_page_queue_free_mtx); again1: if (vm_contig_launder(PQ_INACTIVE)) goto again1; @@ -224,7 +226,9 @@ again1: vm_page_free(m); VM_OBJECT_UNLOCK(object); } - mtx_lock_spin(&vm_page_queue_free_mtx); + } + for (i = start; i < (start + size / PAGE_SIZE); i++) { + vm_page_t m = &pga[i]; vm_pageq_remove_nowakeup(m); m->valid = VM_PAGE_BITS_ALL; if (m->flags & PG_ZERO) @@ -236,8 +240,8 @@ again1: m->wire_count = 0; m->busy = 0; m->object = NULL; - mtx_unlock_spin(&vm_page_queue_free_mtx); } + mtx_unlock_spin(&vm_page_queue_free_mtx); vm_page_unlock_queues(); /* * We've found a contiguous chunk that meets are requirements. |