diff options
author | Matthew Dillon <dillon@FreeBSD.org> | 1999-02-12 09:51:43 +0000 |
---|---|---|
committer | Matthew Dillon <dillon@FreeBSD.org> | 1999-02-12 09:51:43 +0000 |
commit | 2aaeadf8d936ff58936f07369a0a12cd333621c7 (patch) | |
tree | d992dbe2f2c9fc61ae4b66cb1c4175ea12d36611 /sys/vm/vm_map.c | |
parent | 28791bce4464424b2d8b641d340ec58ed77666be (diff) | |
download | src-2aaeadf8d936ff58936f07369a0a12cd333621c7.tar.gz src-2aaeadf8d936ff58936f07369a0a12cd333621c7.zip |
Fix non-fatal bug in vm_map_insert() which improperly cleared
OBJ_ONEMAPPING in the case where an object is extended by an
additional vm_map_entry must be allocated.
In vm_object_madvise(), remove calll to vm_page_cache() in MADV_FREE
case in order to avoid a page fault on page reuse. However, we still
mark the page as clean and destroy any swap backing store.
Submitted by: Alan Cox <alc@cs.rice.edu>
Notes
Notes:
svn path=/head/; revision=43923
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r-- | sys/vm/vm_map.c | 79 |
1 files changed, 37 insertions, 42 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index a44124d3510a..f43c38d18836 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_map.c,v 1.147 1999/02/03 01:57:16 dillon Exp $ + * $Id: vm_map.c,v 1.148 1999/02/07 21:48:22 dillon Exp $ */ /* @@ -429,6 +429,9 @@ vm_map_lookup_entry(map, address, entry) * size should match that of the address range. * * Requires that the map be locked, and leaves it so. + * + * If object is non-NULL, ref count must be bumped by caller + * prior to making call to account for the new entry. */ int vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, @@ -438,9 +441,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_map_entry_t new_entry; vm_map_entry_t prev_entry; vm_map_entry_t temp_entry; -#if 0 - vm_object_t prev_object; -#endif u_char protoeflags; if ((object != NULL) && (cow & MAP_NOFAULT)) { @@ -483,13 +483,18 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, if (cow & MAP_NOFAULT) protoeflags |= MAP_ENTRY_NOFAULT; - /* - * See if we can avoid creating a new entry by extending one of our - * neighbors. Or at least extend the object. - */ - - if ( - (object == NULL) && + if (object) { + /* + * When object is non-NULL, it could be shared with another + * process. We have to set or clear OBJ_ONEMAPPING + * appropriately. + */ + if ((object->ref_count > 1) || (object->shadow_count != 0)) { + vm_object_clear_flag(object, OBJ_ONEMAPPING); + } else { + vm_object_set_flag(object, OBJ_ONEMAPPING); + } + } else if ( (prev_entry != &map->header) && ((prev_entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) && ((prev_entry->object.vm_object == NULL) || @@ -506,8 +511,9 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, (vm_size_t) (end - prev_entry->end)))) { /* - * Coalesced the two objects. Can we extend the - * previous map entry to include the new range? + * We were able to extend the object. Determine if we + * can extend the previous map entry to include the + * new range as well. */ if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && (prev_entry->protection == prot) && @@ -515,28 +521,29 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, map->size += (end - prev_entry->end); prev_entry->end = end; -#if 0 - /* - * (no longer applies) - */ - if ((cow & MAP_NOFAULT) == 0) { - prev_object = prev_entry->object.vm_object; - default_pager_convert_to_swapq(prev_object); - } -#endif return (KERN_SUCCESS); } - else { - object = prev_entry->object.vm_object; - offset = prev_entry->offset + (prev_entry->end - - prev_entry->start); - vm_object_reference(object); - } + /* + * If we can extend the object but cannot extend the + * map entry, we have to create a new map entry. We + * must bump the ref count on the extended object to + * account for it. + */ + object = prev_entry->object.vm_object; + offset = prev_entry->offset + + (prev_entry->end - prev_entry->start); + vm_object_reference(object); } } /* + * NOTE: if conditionals fail, object can be NULL here. This occurs + * in things like the buffer map where we manage kva but do not manage + * backing objects. + */ + + /* * Create a new entry */ @@ -549,14 +556,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, new_entry->offset = offset; new_entry->avail_ssize = 0; - if (object) { - if ((object->ref_count > 1) || (object->shadow_count != 0)) { - vm_object_clear_flag(object, OBJ_ONEMAPPING); - } else { - vm_object_set_flag(object, OBJ_ONEMAPPING); - } - } - if (map->is_main_map) { new_entry->inheritance = VM_INHERIT_DEFAULT; new_entry->protection = prot; @@ -577,12 +576,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, (prev_entry->end >= new_entry->start)) map->first_free = new_entry; -#if 0 - /* - * (no longer applies) - */ - default_pager_convert_to_swapq(object); -#endif return (KERN_SUCCESS); } @@ -853,6 +846,8 @@ vm_map_findspace(map, start, length, addr) * first-fit from the specified address; the region found is * returned in the same parameter. * + * If object is non-NULL, ref count must be bumped by caller + * prior to making call to account for the new entry. */ int vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, |