aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_map.c
diff options
context:
space:
mode:
authorDavid Greenman <dg@FreeBSD.org>1995-02-02 09:09:15 +0000
committerDavid Greenman <dg@FreeBSD.org>1995-02-02 09:09:15 +0000
commita1f6d91cc2800e2993d2c36e1fd25bfe028515d0 (patch)
tree4f3055bbc0349dde0f762762aab8d6b63f1dbf2f /sys/vm/vm_map.c
parent8e95996cd77155a06e1e7fd8124d8e1d45f9edca (diff)
downloadsrc-a1f6d91cc2800e2993d2c36e1fd25bfe028515d0.tar.gz
src-a1f6d91cc2800e2993d2c36e1fd25bfe028515d0.zip
swap_pager.c:
Fixed long standing bug in freeing swap space during object collapses. Fixed 'out of space' messages from printing out too often. Modified to use new kmem_malloc() calling convention. Implemented an additional stat in the swap pager struct to count the amount of space allocated to that pager. This may be removed at some point in the future. Minimized unnecessary wakeups. vm_fault.c: Don't try to collect fault stats on 'swapped' processes - there aren't any upages to store the stats in. Changed read-ahead policy (again!). vm_glue.c: Be sure to gain a reference to the process's map before swapping. Be sure to lose it when done. kern_malloc.c: Added the ability to specify if allocations are at interrupt time or are 'safe'; this affects what types of pages can be allocated. vm_map.c: Fixed a variety of map lock problems; there's still a lurking bug that will eventually bite. vm_object.c: Explicitly initialize the object fields rather than bzeroing the struct. Eliminated the 'rcollapse' code and folded it's functionality into the "real" collapse routine. Moved an object_unlock() so that the backing_object is protected in the qcollapse routine. Make sure nobody fools with the backing_object when we're destroying it. Added some diagnostic code which can be called from the debugger that looks through all the internal objects and makes certain that they all belong to someone. vm_page.c: Fixed a rather serious logic bug that would result in random system crashes. Changed pagedaemon wakeup policy (again!). vm_pageout.c: Removed unnecessary page rotations on the inactive queue. Changed the number of pages to explicitly free to just free_reserved level. Submitted by: John Dyson
Notes
Notes: svn path=/head/; revision=6129
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r--sys/vm/vm_map.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 5ba589570c07..941b0dcddf55 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.11 1995/01/10 07:32:46 davidg Exp $
+ * $Id: vm_map.c,v 1.12 1995/01/24 10:13:02 davidg Exp $
*/
/*
@@ -216,6 +216,9 @@ vmspace_free(vm)
register struct vmspace *vm;
{
+ if (vm->vm_refcnt == 0)
+ panic("vmspace_free: attempt to free already freed vmspace");
+
if (--vm->vm_refcnt == 0) {
/*
* Lock the map, to wait out all other references to it.
@@ -225,6 +228,10 @@ vmspace_free(vm)
vm_map_lock(&vm->vm_map);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
+ vm_map_unlock(&vm->vm_map);
+ while( vm->vm_map.ref_count != 1)
+ tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
+ --vm->vm_map.ref_count;
pmap_release(&vm->vm_pmap);
FREE(vm, M_VMMAP);
}
@@ -448,10 +455,15 @@ vm_map_deallocate(map)
return;
simple_lock(&map->ref_lock);
- c = --map->ref_count;
+ c = map->ref_count;
simple_unlock(&map->ref_lock);
- if (c > 0) {
+ if (c == 0)
+ panic("vm_map_deallocate: deallocating already freed map");
+
+ if (c != 1) {
+ --map->ref_count;
+ wakeup((caddr_t) &map->ref_count);
return;
}
/*
@@ -459,11 +471,14 @@ vm_map_deallocate(map)
*/
vm_map_lock(map);
-
(void) vm_map_delete(map, map->min_offset, map->max_offset);
+ --map->ref_count;
+ if( map->ref_count != 0) {
+ vm_map_unlock(map);
+ return;
+ }
pmap_destroy(map->pmap);
-
FREE(map, M_VMMAP);
}
@@ -1039,8 +1054,10 @@ vm_map_protect(map, start, end, new_prot, set_max)
current = entry;
while ((current != &map->header) && (current->start < end)) {
- if (current->is_sub_map)
+ if (current->is_sub_map) {
+ vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
+ }
if ((new_prot & current->max_protection) != new_prot) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);