aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBojan Novković <bnovkov@FreeBSD.org>2024-05-03 16:48:18 +0000
committerBojan Novković <bnovkov@FreeBSD.org>2024-05-25 17:24:46 +0000
commitda76d349b6b104f4e70562304c800a0793dea18d (patch)
treebdf017611be2509b68bf0aff30aaeb6e362f3737
parent9b1de7e4844d951a7d7335cbde75a86a2380e220 (diff)
downloadsrc-da76d349b6b104f4e70562304c800a0793dea18d.tar.gz
src-da76d349b6b104f4e70562304c800a0793dea18d.zip
uma: Deduplicate uma_small_alloc
This commit refactors the UMA small alloc code and removes most UMA machine-dependent code. The existing machine-dependent uma_small_alloc code is almost identical across all architectures, except for powerpc where using the direct map addresses involved extra steps in some cases. The MI/MD split was replaced by a default uma_small_alloc implementation that can be overridden by architecture-specific code by defining the UMA_MD_SMALL_ALLOC symbol. Furthermore, UMA_USE_DMAP was introduced to replace most UMA_MD_SMALL_ALLOC uses. Reviewed by: markj, kib Approved by: markj (mentor) Differential Revision: https://reviews.freebsd.org/D45084
-rw-r--r--sys/amd64/amd64/uma_machdep.c71
-rw-r--r--sys/amd64/include/vmparam.h6
-rw-r--r--sys/arm64/arm64/uma_machdep.c69
-rw-r--r--sys/arm64/include/vmparam.h2
-rw-r--r--sys/conf/files.amd641
-rw-r--r--sys/conf/files.arm641
-rw-r--r--sys/conf/files.riscv1
-rw-r--r--sys/kern/subr_vmem.c6
-rw-r--r--sys/powerpc/include/vmparam.h6
-rw-r--r--sys/riscv/include/vmparam.h2
-rw-r--r--sys/riscv/riscv/uma_machdep.c68
-rw-r--r--sys/vm/uma_core.c43
-rw-r--r--sys/vm/vm_map.c8
-rw-r--r--sys/vm/vm_radix.c2
14 files changed, 57 insertions, 229 deletions
diff --git a/sys/amd64/amd64/uma_machdep.c b/sys/amd64/amd64/uma_machdep.c
deleted file mode 100644
index f83f0674cc4e..000000000000
--- a/sys/amd64/amd64/uma_machdep.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/malloc.h>
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
-#include <vm/vm_dumpset.h>
-#include <vm/uma.h>
-#include <vm/uma_int.h>
-#include <machine/md_var.h>
-
-void *
-uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
- int wait)
-{
- vm_page_t m;
- vm_paddr_t pa;
- void *va;
-
- *flags = UMA_SLAB_PRIV;
- m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
- VM_ALLOC_WIRED);
- if (m == NULL)
- return (NULL);
- pa = m->phys_addr;
- if ((wait & M_NODUMP) == 0)
- dump_add_page(pa);
- va = (void *)PHYS_TO_DMAP(pa);
- return (va);
-}
-
-void
-uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
-{
- vm_page_t m;
- vm_paddr_t pa;
-
- pa = DMAP_TO_PHYS((vm_offset_t)mem);
- dump_drop_page(pa);
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_unwire_noq(m);
- vm_page_free(m);
-}
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index bff9bf840036..e5155a7c7d47 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -72,12 +72,12 @@
#endif
/*
- * We provide a machine specific single page allocator through the use
- * of the direct mapped segment. This uses 2MB pages for reduced
+ * We provide a single page allocator through the use of the
+ * direct mapped segment. This uses 2MB pages for reduced
* TLB pressure.
*/
#if !defined(KASAN) && !defined(KMSAN)
-#define UMA_MD_SMALL_ALLOC
+#define UMA_USE_DMAP
#endif
/*
diff --git a/sys/arm64/arm64/uma_machdep.c b/sys/arm64/arm64/uma_machdep.c
deleted file mode 100644
index f942248d4dcd..000000000000
--- a/sys/arm64/arm64/uma_machdep.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*-
- * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/malloc.h>
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
-#include <vm/vm_dumpset.h>
-#include <vm/uma.h>
-#include <vm/uma_int.h>
-#include <machine/machdep.h>
-
-void *
-uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
- int wait)
-{
- vm_page_t m;
- vm_paddr_t pa;
- void *va;
-
- *flags = UMA_SLAB_PRIV;
- m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
- VM_ALLOC_WIRED);
- if (m == NULL)
- return (NULL);
- pa = m->phys_addr;
- if ((wait & M_NODUMP) == 0)
- dump_add_page(pa);
- va = (void *)PHYS_TO_DMAP(pa);
- return (va);
-}
-
-void
-uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
-{
- vm_page_t m;
- vm_paddr_t pa;
-
- pa = DMAP_TO_PHYS((vm_offset_t)mem);
- dump_drop_page(pa);
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_unwire_noq(m);
- vm_page_free(m);
-}
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
index ffa5a538504a..0dcd02d63938 100644
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -293,7 +293,7 @@
#endif
#if !defined(KASAN) && !defined(KMSAN)
-#define UMA_MD_SMALL_ALLOC
+#define UMA_USE_DMAP
#endif
#ifndef LOCORE
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 18dec5ed47b0..add27418ce08 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -92,7 +92,6 @@ amd64/amd64/support.S standard
amd64/amd64/sys_machdep.c standard
amd64/amd64/trap.c standard
amd64/amd64/uio_machdep.c standard
-amd64/amd64/uma_machdep.c standard
amd64/amd64/vm_machdep.c standard
amd64/pci/pci_cfgreg.c optional pci
cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}"
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index a3d4fc09da89..8139a7af8ed3 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -78,7 +78,6 @@ arm64/arm64/swtch.S standard
arm64/arm64/sys_machdep.c standard
arm64/arm64/trap.c standard
arm64/arm64/uio_machdep.c standard
-arm64/arm64/uma_machdep.c standard
arm64/arm64/undefined.c standard
arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \
compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*}"
diff --git a/sys/conf/files.riscv b/sys/conf/files.riscv
index be7ae2b40a08..49c8ddd0c516 100644
--- a/sys/conf/files.riscv
+++ b/sys/conf/files.riscv
@@ -67,7 +67,6 @@ riscv/riscv/sys_machdep.c standard
riscv/riscv/trap.c standard
riscv/riscv/timer.c standard
riscv/riscv/uio_machdep.c standard
-riscv/riscv/uma_machdep.c standard
riscv/riscv/unwind.c optional ddb | kdtrace_hooks | stack
riscv/riscv/vm_machdep.c standard
diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c
index 1c9a8a5be979..a706d944dc3f 100644
--- a/sys/kern/subr_vmem.c
+++ b/sys/kern/subr_vmem.c
@@ -624,14 +624,14 @@ qc_drain(vmem_t *vm)
uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
}
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
/*
* vmem_bt_alloc: Allocate a new page of boundary tags.
*
- * On architectures with uma_small_alloc there is no recursion; no address
+ * On architectures with UMA_USE_DMAP there is no recursion; no address
* space need be allocated to allocate boundary tags. For the others, we
* must handle recursion. Boundary tags are necessary to allocate new
* boundary tags.
@@ -707,7 +707,7 @@ vmem_startup(void)
vmem_bt_zone = uma_zcreate("vmem btag",
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
/*
diff --git a/sys/powerpc/include/vmparam.h b/sys/powerpc/include/vmparam.h
index 89982a618bc7..250da8298610 100644
--- a/sys/powerpc/include/vmparam.h
+++ b/sys/powerpc/include/vmparam.h
@@ -111,6 +111,8 @@
#define KERNBASE 0x00100100 /* start of kernel virtual */
+#define UMA_MD_SMALL_ALLOC
+
#ifdef AIM
#ifndef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)
@@ -122,13 +124,13 @@
* Use the direct-mapped BAT registers for UMA small allocs. This
* takes pressure off the small amount of available KVA.
*/
-#define UMA_MD_SMALL_ALLOC
+#define UMA_USE_DMAP
#else /* Book-E */
/* Use the direct map for UMA small allocs on powerpc64. */
#ifdef __powerpc64__
-#define UMA_MD_SMALL_ALLOC
+#define UMA_USE_DMAP
#else
#define VM_MIN_KERNEL_ADDRESS 0xc0000000
#define VM_MAX_KERNEL_ADDRESS 0xffffefff
diff --git a/sys/riscv/include/vmparam.h b/sys/riscv/include/vmparam.h
index d2014654b691..5711bc8c347e 100644
--- a/sys/riscv/include/vmparam.h
+++ b/sys/riscv/include/vmparam.h
@@ -234,7 +234,7 @@
#define VM_INITIAL_PAGEIN 16
#endif
-#define UMA_MD_SMALL_ALLOC
+#define UMA_USE_DMAP
#ifndef LOCORE
extern vm_paddr_t dmap_phys_base;
diff --git a/sys/riscv/riscv/uma_machdep.c b/sys/riscv/riscv/uma_machdep.c
deleted file mode 100644
index 54e0d25800f6..000000000000
--- a/sys/riscv/riscv/uma_machdep.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*-
- * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/malloc.h>
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
-#include <vm/vm_dumpset.h>
-#include <vm/uma.h>
-#include <vm/uma_int.h>
-
-void *
-uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
- int wait)
-{
- vm_page_t m;
- vm_paddr_t pa;
- void *va;
-
- *flags = UMA_SLAB_PRIV;
- m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
- VM_ALLOC_WIRED);
- if (m == NULL)
- return (NULL);
- pa = m->phys_addr;
- if ((wait & M_NODUMP) == 0)
- dump_add_page(pa);
- va = (void *)PHYS_TO_DMAP(pa);
- return (va);
-}
-
-void
-uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
-{
- vm_page_t m;
- vm_paddr_t pa;
-
- pa = DMAP_TO_PHYS((vm_offset_t)mem);
- dump_drop_page(pa);
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_unwire_noq(m);
- vm_page_free(m);
-}
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index d185f12448ee..f9b6e18899c6 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -2079,6 +2079,28 @@ contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
}
+#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC)
+void *
+uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
+ int wait)
+{
+ vm_page_t m;
+ vm_paddr_t pa;
+ void *va;
+
+ *flags = UMA_SLAB_PRIV;
+ m = vm_page_alloc_noobj_domain(domain,
+ malloc2vm_flags(wait) | VM_ALLOC_WIRED);
+ if (m == NULL)
+ return (NULL);
+ pa = m->phys_addr;
+ if ((wait & M_NODUMP) == 0)
+ dump_add_page(pa);
+ va = (void *)PHYS_TO_DMAP(pa);
+ return (va);
+}
+#endif
+
/*
* Frees a number of pages to the system
*
@@ -2141,6 +2163,21 @@ pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
kva_free(sva, size);
}
+#if defined(UMA_USE_DMAP) && !defined(UMA_MD_SMALL_ALLOC)
+void
+uma_small_free(void *mem, vm_size_t size, uint8_t flags)
+{
+ vm_page_t m;
+ vm_paddr_t pa;
+
+ pa = DMAP_TO_PHYS((vm_offset_t)mem);
+ dump_drop_page(pa);
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_unwire_noq(m);
+ vm_page_free(m);
+}
+#endif
+
/*
* Zero fill initializer
*
@@ -3154,7 +3191,7 @@ uma_startup1(vm_offset_t virtual_avail)
smr_init();
}
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
extern void vm_radix_reserve_kva(void);
#endif
@@ -3174,7 +3211,7 @@ uma_startup2(void)
vm_map_unlock(kernel_map);
}
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
/* Set up radix zone to use noobj_alloc. */
vm_radix_reserve_kva();
#endif
@@ -5171,7 +5208,7 @@ uma_zone_reserve_kva(uma_zone_t zone, int count)
pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
-#ifdef UMA_MD_SMALL_ALLOC
+#ifdef UMA_USE_DMAP
if (keg->uk_ppera > 1) {
#else
if (1) {
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 3111dda6e99d..3c7afcb6642f 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -172,7 +172,7 @@ static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
start = end; \
}
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
/*
* Allocate a new slab for kernel map entries. The kernel map may be locked or
@@ -264,7 +264,7 @@ vm_map_startup(void)
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_VM | UMA_ZONE_NOBUCKET);
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
/* Reserve an extra map entry for use when replenishing the reserve. */
uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1);
uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1);
@@ -660,7 +660,7 @@ _vm_map_unlock(vm_map_t map, const char *file, int line)
VM_MAP_UNLOCK_CONSISTENT(map);
if (map->system_map) {
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
uma_prealloc(kmapentzone, 1);
map->flags &= ~MAP_REPLENISH;
@@ -937,7 +937,7 @@ vm_map_entry_create(vm_map_t map)
{
vm_map_entry_t new_entry;
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
if (map == kernel_map) {
VM_MAP_ASSERT_LOCKED(map);
diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c
index cfc5a82eacc8..13f9d36194ab 100644
--- a/sys/vm/vm_radix.c
+++ b/sys/vm/vm_radix.c
@@ -82,7 +82,7 @@ vm_radix_node_free(struct pctrie *ptree, void *node)
uma_zfree_smr(vm_radix_node_zone, node);
}
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
void vm_radix_reserve_kva(void);
/*
* Reserve the KVA necessary to satisfy the node allocation.