aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/uma_core.c
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2010-11-04 15:33:50 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2010-11-04 15:33:50 +0000
commite9a069d8afeaf1233857445cf0794911c66b9725 (patch)
tree2ed6222da3a25ebbe2efd8d56dcaadefe82e4be6 /sys/vm/uma_core.c
parent9e5c40b3859210c66d7c143856f8c7073dfe59c3 (diff)
downloadsrc-e9a069d8afeaf1233857445cf0794911c66b9725.tar.gz
src-e9a069d8afeaf1233857445cf0794911c66b9725.zip
Update startup_alloc() to support multi-page allocations and allow internal
zones whose objects are larger than a page to use startup_alloc(). This allows allocation of zone objects during early boot on machines with a large number of CPUs since the resulting zone objects are larger than a page. Submitted by: trema Reviewed by: attilio MFC after: 1 week
Notes
Notes: svn path=/head/; revision=214782
Diffstat (limited to 'sys/vm/uma_core.c')
-rw-r--r--sys/vm/uma_core.c37
1 files changed, 29 insertions, 8 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 43e37036dd2b..1c0ef0fd8ba6 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -930,15 +930,32 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{
uma_keg_t keg;
uma_slab_t tmps;
+ int pages, check_pages;
keg = zone_first_keg(zone);
+ pages = howmany(bytes, PAGE_SIZE);
+ check_pages = pages - 1;
+ KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
/*
* Check our small startup cache to see if it has pages remaining.
*/
mtx_lock(&uma_boot_pages_mtx);
- if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) {
- LIST_REMOVE(tmps, us_link);
+
+ /* First check if we have enough room. */
+ tmps = LIST_FIRST(&uma_boot_pages);
+ while (tmps != NULL && check_pages-- > 0)
+ tmps = LIST_NEXT(tmps, us_link);
+ if (tmps != NULL) {
+ /*
+ * It's ok to lose tmps references. The last one will
+ * have tmps->us_data pointing to the start address of
+ * "pages" contiguous pages of memory.
+ */
+ while (pages-- > 0) {
+ tmps = LIST_FIRST(&uma_boot_pages);
+ LIST_REMOVE(tmps, us_link);
+ }
mtx_unlock(&uma_boot_pages_mtx);
*pflag = tmps->us_flags;
return (tmps->us_data);
@@ -950,7 +967,7 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
* Now that we've booted reset these users to their real allocator.
*/
#ifdef UMA_MD_SMALL_ALLOC
- keg->uk_allocf = uma_small_alloc;
+ keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
#else
keg->uk_allocf = page_alloc;
#endif
@@ -1177,12 +1194,15 @@ keg_large_init(uma_keg_t keg)
keg->uk_ppera = pages;
keg->uk_ipers = 1;
+ keg->uk_rsize = keg->uk_size;
+
+ /* We can't do OFFPAGE if we're internal, bail out here. */
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
+ return;
keg->uk_flags |= UMA_ZONE_OFFPAGE;
if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
keg->uk_flags |= UMA_ZONE_HASH;
-
- keg->uk_rsize = keg->uk_size;
}
static void
@@ -1301,7 +1321,8 @@ keg_ctor(void *mem, int size, void *udata, int flags)
#endif
if (booted == 0)
keg->uk_allocf = startup_alloc;
- }
+ } else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL))
+ keg->uk_allocf = startup_alloc;
/*
* Initialize keg's lock (shared among zones).
@@ -1330,7 +1351,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
if (totsize & UMA_ALIGN_PTR)
totsize = (totsize & ~UMA_ALIGN_PTR) +
(UMA_ALIGN_PTR + 1);
- keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
+ keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
if (keg->uk_flags & UMA_ZONE_REFCNT)
totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
@@ -1346,7 +1367,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
* mathematically possible for all cases, so we make
* sure here anyway.
*/
- if (totsize > UMA_SLAB_SIZE) {
+ if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
printf("zone %s ipers %d rsize %d size %d\n",
zone->uz_name, keg->uk_ipers, keg->uk_rsize,
keg->uk_size);