aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/uma_core.c
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2010-05-03 17:35:31 +0000
committerAlan Cox <alc@FreeBSD.org>2010-05-03 17:35:31 +0000
commit451033a48ac5930e486f46db2235bd01f9eca9c3 (patch)
tree9d191e34674db9a2754ac117636f950b9334e903 /sys/vm/uma_core.c
parent5fdd0a335f567c95f479d2739e1df0f664626f21 (diff)
downloadsrc-451033a48ac5930e486f46db2235bd01f9eca9c3.tar.gz
src-451033a48ac5930e486f46db2235bd01f9eca9c3.zip
It makes more sense for the object-based backend allocator to use OBJT_PHYS
objects instead of OBJT_DEFAULT objects because we never reclaim or pageout the allocated pages. Moreover, they are mapped with pmap_qenter(), which creates unmanaged mappings. Reviewed by: kib
Notes
Notes: svn path=/head/; revision=207576
Diffstat (limited to 'sys/vm/uma_core.c')
-rw-r--r--sys/vm/uma_core.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 00a8276d36fd..e1b9a08109d4 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1022,12 +1022,8 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
while (pages != startpages) {
pages--;
p = TAILQ_LAST(&object->memq, pglist);
- vm_page_lock(p);
- vm_page_lock_queues();
vm_page_unwire(p, 0);
vm_page_free(p);
- vm_page_unlock_queues();
- vm_page_unlock(p);
}
retkva = 0;
goto done;
@@ -2893,13 +2889,11 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
if (kva == 0)
return (0);
- if (obj == NULL) {
- obj = vm_object_allocate(OBJT_DEFAULT,
- pages);
- } else {
+ if (obj == NULL)
+ obj = vm_object_allocate(OBJT_PHYS, pages);
+ else {
VM_OBJECT_LOCK_INIT(obj, "uma object");
- _vm_object_allocate(OBJT_DEFAULT,
- pages, obj);
+ _vm_object_allocate(OBJT_PHYS, pages, obj);
}
ZONE_LOCK(zone);
keg->uk_kva = kva;