aboutsummaryrefslogtreecommitdiff
path: root/contrib/jemalloc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/jemalloc')
-rw-r--r--contrib/jemalloc/COPYING4
-rw-r--r--contrib/jemalloc/ChangeLog191
-rw-r--r--contrib/jemalloc/FREEBSD-Xlist17
-rw-r--r--contrib/jemalloc/FREEBSD-diffs316
-rwxr-xr-xcontrib/jemalloc/FREEBSD-upgrade101
-rw-r--r--contrib/jemalloc/VERSION2
-rw-r--r--contrib/jemalloc/doc/jemalloc.3925
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena.h1538
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_externs.h96
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h57
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h361
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h11
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h284
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_types.h45
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/assert.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/atomic.h692
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/atomic_c11.h97
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h127
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h191
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h31
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h56
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h52
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base.h25
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_externs.h19
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_inlines.h9
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_structs.h55
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_types.h7
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bit_util.h165
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bitmap.h279
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk.h97
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_dss.h37
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h21
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ckh.h113
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ctl.h175
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent.h275
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_dss.h26
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_externs.h72
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_inlines.h407
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_mmap.h10
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_structs.h199
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_types.h9
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hash.h105
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hooks.h12
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/huge.h35
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h1291
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h14
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h125
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h53
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h94
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h168
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h86
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h197
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h73
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h178
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h176
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/large_externs.h26
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/malloc_io.h62
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mb.h115
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex.h307
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex_pool.h94
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex_prof.h86
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/nstime.h70
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/pages.h88
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ph.h152
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/private_namespace.h1007
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prng.h140
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof.h547
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_externs.h92
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h72
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h217
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_structs.h201
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_types.h56
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/public_namespace.h42
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ql.h43
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/qr.h35
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/quarantine.h60
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rb.h127
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree.h716
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h50
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/size_classes.h10812
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/smoothstep.h30
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/spin.h67
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/stats.h243
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/stats_tsd.h12
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/sz.h317
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache.h472
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_externs.h55
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h250
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_structs.h64
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_types.h61
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ticker.h89
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd.h978
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_generic.h157
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h60
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_tls.h59
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_types.h10
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/util.h319
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/valgrind.h128
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/witness.h350
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc.h233
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h80
-rw-r--r--contrib/jemalloc/src/arena.c4738
-rw-r--r--contrib/jemalloc/src/atomic.c2
-rw-r--r--contrib/jemalloc/src/background_thread.c846
-rw-r--r--contrib/jemalloc/src/base.c469
-rw-r--r--contrib/jemalloc/src/bitmap.c74
-rw-r--r--contrib/jemalloc/src/chunk.c799
-rw-r--r--contrib/jemalloc/src/chunk_dss.c247
-rw-r--r--contrib/jemalloc/src/chunk_mmap.c78
-rw-r--r--contrib/jemalloc/src/ckh.c206
-rw-r--r--contrib/jemalloc/src/ctl.c2166
-rw-r--r--contrib/jemalloc/src/extent.c1913
-rw-r--r--contrib/jemalloc/src/extent_dss.c269
-rw-r--r--contrib/jemalloc/src/extent_mmap.c42
-rw-r--r--contrib/jemalloc/src/hash.c5
-rw-r--r--contrib/jemalloc/src/hooks.c12
-rw-r--r--contrib/jemalloc/src/huge.c498
-rw-r--r--contrib/jemalloc/src/jemalloc.c2905
-rw-r--r--contrib/jemalloc/src/large.c371
-rw-r--r--contrib/jemalloc/src/malloc_io.c (renamed from contrib/jemalloc/src/util.c)185
-rw-r--r--contrib/jemalloc/src/mb.c2
-rw-r--r--contrib/jemalloc/src/mutex.c212
-rw-r--r--contrib/jemalloc/src/mutex_pool.c18
-rw-r--r--contrib/jemalloc/src/nstime.c140
-rw-r--r--contrib/jemalloc/src/pages.c346
-rw-r--r--contrib/jemalloc/src/prng.c5
-rw-r--r--contrib/jemalloc/src/prof.c1143
-rw-r--r--contrib/jemalloc/src/quarantine.c183
-rw-r--r--contrib/jemalloc/src/rtree.c388
-rw-r--r--contrib/jemalloc/src/spin.c6
-rw-r--r--contrib/jemalloc/src/stats.c966
-rw-r--r--contrib/jemalloc/src/sz.c106
-rw-r--r--contrib/jemalloc/src/tcache.c536
-rw-r--r--contrib/jemalloc/src/ticker.c5
-rw-r--r--contrib/jemalloc/src/tsd.c244
-rw-r--r--contrib/jemalloc/src/witness.c120
137 files changed, 25447 insertions, 23896 deletions
diff --git a/contrib/jemalloc/COPYING b/contrib/jemalloc/COPYING
index 104b1f8b0177..e308632a8132 100644
--- a/contrib/jemalloc/COPYING
+++ b/contrib/jemalloc/COPYING
@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
-Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
-Copyright (C) 2009-2016 Facebook, Inc. All rights reserved.
+Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index a9406853e1bf..98c12f2048e5 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -4,6 +4,193 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
+* 5.0.0 (June 13, 2017)
+
+ Unlike all previous jemalloc releases, this release does not use naturally
+ aligned "chunks" for virtual memory management, and instead uses page-aligned
+ "extents". This change has few externally visible effects, but the internal
+ impacts are... extensive. Many other internal changes combine to make this
+ the most cohesively designed version of jemalloc so far, with ample
+ opportunity for further enhancements.
+
+ Continuous integration is now an integral aspect of development thanks to the
+ efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
+ stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a
+ side effect the official release frequency may decrease over time.
+
+ New features:
+ - Implement optional per-CPU arena support; threads choose which arena to use
+ based on current CPU rather than on fixed thread-->arena associations.
+ (@interwq)
+ - Implement two-phase decay of unused dirty pages. Pages transition from
+ dirty-->muzzy-->clean, where the first phase transition relies on
+ madvise(... MADV_FREE) semantics, and the second phase transition discards
+ pages such that they are replaced with demand-zeroed pages on next access.
+ (@jasone)
+ - Increase decay time resolution from seconds to milliseconds. (@jasone)
+ - Implement opt-in per CPU background threads, and use them for asynchronous
+ decay-driven unused dirty page purging. (@interwq)
+ - Add mutex profiling, which collects a variety of statistics useful for
+ diagnosing overhead/contention issues. (@interwq)
+ - Add C++ new/delete operator bindings. (@djwatson)
+ - Support manually created arena destruction, such that all data and metadata
+ are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
+ associated with destroyed arenas. (@jasone)
+ - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
+ merged/destroyed arena statistics via mallctl. (@jasone)
+ - Add opt.abort_conf to optionally abort if invalid configuration options are
+ detected during initialization. (@interwq)
+ - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
+ stats dumped during exit if opt.stats_print is true. (@jasone)
+ - Add --with-version=VERSION for use when embedding jemalloc into another
+ project's git repository. (@jasone)
+ - Add --disable-thp to support cross compiling. (@jasone)
+ - Add --with-lg-hugepage to support cross compiling. (@jasone)
+ - Add mallctl interfaces (various authors):
+ + background_thread
+ + opt.abort_conf
+ + opt.retain
+ + opt.percpu_arena
+ + opt.background_thread
+ + opt.{dirty,muzzy}_decay_ms
+ + opt.stats_print_opts
+ + arena.<i>.initialized
+ + arena.<i>.destroy
+ + arena.<i>.{dirty,muzzy}_decay_ms
+ + arena.<i>.extent_hooks
+ + arenas.{dirty,muzzy}_decay_ms
+ + arenas.bin.<i>.slab_size
+ + arenas.nlextents
+ + arenas.lextent.<i>.size
+ + arenas.create
+ + stats.background_thread.{num_threads,num_runs,run_interval}
+ + stats.mutexes.{ctl,background_thread,prof,reset}.
+ {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
+ num_owner_switch}
+ + stats.arenas.<i>.{dirty,muzzy}_decay_ms
+ + stats.arenas.<i>.uptime
+ + stats.arenas.<i>.{pmuzzy,base,internal,resident}
+ + stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
+ + stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
+ + stats.arenas.<i>.bins.<j>.mutex.
+ {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
+ num_owner_switch}
+ + stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
+ + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
+ extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
+ {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
+ num_owner_switch}
+
+ Portability improvements:
+ - Improve reentrant allocation support, such that deadlock is less likely if
+ e.g. a system library call in turn allocates memory. (@davidtgoldblatt,
+ @interwq)
+ - Support static linking of jemalloc with glibc. (@djwatson)
+
+ Optimizations and refactors:
+ - Organize virtual memory as "extents" of virtual memory pages, rather than as
+ naturally aligned "chunks", and store all metadata in arbitrarily distant
+ locations. This reduces virtual memory external fragmentation, and will
+ interact better with huge pages (not yet explicitly supported). (@jasone)
+ - Fold large and huge size classes together; only small and large size classes
+ remain. (@jasone)
+ - Unify the allocation paths, and merge most fast-path branching decisions.
+ (@davidtgoldblatt, @interwq)
+ - Embed per thread automatic tcache into thread-specific data, which reduces
+ conditional branches and dereferences. Also reorganize tcache to increase
+ fast-path data locality. (@interwq)
+ - Rewrite atomics to closely model the C11 API, convert various
+ synchronization from mutex-based to atomic, and use the explicit memory
+ ordering control to resolve various hypothetical races without increasing
+ synchronization overhead. (@davidtgoldblatt)
+ - Extensively optimize rtree via various methods:
+ + Add multiple layers of rtree lookup caching, since rtree lookups are now
+ part of fast-path deallocation. (@interwq)
+ + Determine rtree layout at compile time. (@jasone)
+ + Make the tree shallower for common configurations. (@jasone)
+ + Embed the root node in the top-level rtree data structure, thus avoiding
+ one level of indirection. (@jasone)
+ + Further specialize leaf elements as compared to internal node elements,
+ and directly embed extent metadata needed for fast-path deallocation.
+ (@jasone)
+ + Ignore leading always-zero address bits (architecture-specific).
+ (@jasone)
+ - Reorganize headers (ongoing work) to make them hermetic, and disentangle
+ various module dependencies. (@davidtgoldblatt)
+ - Convert various internal data structures such as size class metadata from
+ boot-time-initialized to compile-time-initialized. Propagate resulting data
+ structure simplifications, such as making arena metadata fixed-size.
+ (@jasone)
+ - Simplify size class lookups when constrained to size classes that are
+ multiples of the page size. This speeds lookups, but the primary benefit is
+ complexity reduction in code that was the source of numerous regressions.
+ (@jasone)
+ - Lock individual extents when possible for localized extent operations,
+ rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone)
+ - Use first fit layout policy instead of best fit, in order to improve
+ packing. (@jasone)
+ - If munmap(2) is not in use, use an exponential series to grow each arena's
+ virtual memory, so that the number of disjoint virtual memory mappings
+ remains low. (@jasone)
+ - Implement per arena base allocators, so that arenas never share any virtual
+ memory pages. (@jasone)
+ - Automatically generate private symbol name mangling macros. (@jasone)
+
+ Incompatible changes:
+ - Replace chunk hooks with an expanded/normalized set of extent hooks.
+ (@jasone)
+ - Remove ratio-based purging. (@jasone)
+ - Remove --disable-tcache. (@jasone)
+ - Remove --disable-tls. (@jasone)
+ - Remove --enable-ivsalloc. (@jasone)
+ - Remove --with-lg-size-class-group. (@jasone)
+ - Remove --with-lg-tiny-min. (@jasone)
+ - Remove --disable-cc-silence. (@jasone)
+ - Remove --enable-code-coverage. (@jasone)
+ - Remove --disable-munmap (replaced by opt.retain). (@jasone)
+ - Remove Valgrind support. (@jasone)
+ - Remove quarantine support. (@jasone)
+ - Remove redzone support. (@jasone)
+ - Remove mallctl interfaces (various authors):
+ + config.munmap
+ + config.tcache
+ + config.tls
+ + config.valgrind
+ + opt.lg_chunk
+ + opt.purge
+ + opt.lg_dirty_mult
+ + opt.decay_time
+ + opt.quarantine
+ + opt.redzone
+ + opt.thp
+ + arena.<i>.lg_dirty_mult
+ + arena.<i>.decay_time
+ + arena.<i>.chunk_hooks
+ + arenas.initialized
+ + arenas.lg_dirty_mult
+ + arenas.decay_time
+ + arenas.bin.<i>.run_size
+ + arenas.nlruns
+ + arenas.lrun.<i>.size
+ + arenas.nhchunks
+ + arenas.hchunk.<i>.size
+ + arenas.extend
+ + stats.cactive
+ + stats.arenas.<i>.lg_dirty_mult
+ + stats.arenas.<i>.decay_time
+ + stats.arenas.<i>.metadata.{mapped,allocated}
+ + stats.arenas.<i>.{npurge,nmadvise,purged}
+ + stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
+ + stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
+ + stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
+ + stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
+
+ Bug fixes:
+ - Improve interval-based profile dump triggering to dump only one profile when
+ a single allocation's size exceeds the interval. (@jasone)
+ - Use prefixed function names (as controlled by --with-jemalloc-prefix) when
+ pruning backtrace frames in jeprof. (@jasone)
+
* 4.5.0 (February 28, 2017)
This is the first release to benefit from much broader continuous integration
@@ -12,7 +199,7 @@ brevity. Much more detail can be found in the git revision history:
regressions fixed by this release.
New features:
- - Add --disable-thp and the opt.thp to provide opt-out mechanisms for
+ - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
transparent huge page integration. (@jasone)
- Update zone allocator integration to work with macOS 10.12. (@glandium)
- Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
@@ -25,7 +212,7 @@ brevity. Much more detail can be found in the git revision history:
- Handle race in per size class utilization computation. This functionality
was first released in 4.0.0. (@interwq)
- Fix lock order reversal during gdump. (@jasone)
- - Fix-refactor tcache synchronization. This regression was first released in
+ - Fix/refactor tcache synchronization. This regression was first released in
4.0.0. (@jasone)
- Fix various JSON-formatted malloc_stats_print() bugs. This functionality
was first released in 4.3.0. (@jasone)
diff --git a/contrib/jemalloc/FREEBSD-Xlist b/contrib/jemalloc/FREEBSD-Xlist
index a1339c9d66bf..13eb35be56e8 100644
--- a/contrib/jemalloc/FREEBSD-Xlist
+++ b/contrib/jemalloc/FREEBSD-Xlist
@@ -4,7 +4,7 @@ $FreeBSD$
.git*
.travis.yml
FREEBSD-*
-INSTALL
+INSTALL.md
Makefile*
README
autogen.sh
@@ -13,23 +13,24 @@ bin/
build-aux/
config.*
configure*
-coverage.sh
doc/*.in
doc/*.xml
doc/*.xsl
doc/*.html
-include/jemalloc/internal/jemalloc_internal.h.in
+include/jemalloc/internal/atomic_msvc.h
include/jemalloc/internal/jemalloc_internal_defs.h.in
+include/jemalloc/internal/jemalloc_preamble.h.in
include/jemalloc/internal/private_namespace.sh
-include/jemalloc/internal/private_symbols.txt
-include/jemalloc/internal/private_unnamespace.h
-include/jemalloc/internal/private_unnamespace.sh
+include/jemalloc/internal/private_symbols_jet.awk
+include/jemalloc/internal/private_symbols.awk
+include/jemalloc/internal/private_symbols.sh
include/jemalloc/internal/public_namespace.sh
include/jemalloc/internal/public_symbols.txt
include/jemalloc/internal/public_unnamespace.h
include/jemalloc/internal/public_unnamespace.sh
include/jemalloc/internal/size_classes.sh
include/jemalloc/internal/smoothstep.sh
+include/jemalloc/internal/tsd_win.h
include/jemalloc/jemalloc.h.in
include/jemalloc/jemalloc.sh
include/jemalloc/jemalloc_defs.h
@@ -48,8 +49,10 @@ include/jemalloc/jemalloc_typedefs.h.in
include/msvc_compat/
install-sh
jemalloc.pc*
+m4/
msvc/
+run_tests.sh
scripts/
-src/valgrind.c
+src/jemalloc_cpp.cpp
src/zone.c
test/
diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs
index 730a6f5f63bf..8786132d5c6a 100644
--- a/contrib/jemalloc/FREEBSD-diffs
+++ b/contrib/jemalloc/FREEBSD-diffs
@@ -1,21 +1,19 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index c97ab0f..be8dda5 100644
+index 21e401ac..f977c5f5 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
-@@ -53,11 +53,23 @@
+@@ -53,11 +53,21 @@
<para>This manual describes jemalloc @jemalloc_version@. More information
can be found at the <ulink
url="http://jemalloc.net/">jemalloc website</ulink>.</para>
+
+ <para>The following configuration options are enabled in libc's built-in
+ jemalloc: <option>--enable-fill</option>,
-+ <option>--enable-lazy-lock</option>, <option>--enable-munmap</option>,
-+ <option>--enable-stats</option>, <option>--enable-tcache</option>,
-+ <option>--enable-tls</option>, <option>--enable-utrace</option>, and
-+ <option>--enable-xmalloc</option>. Additionally,
-+ <option>--enable-debug</option> is enabled in development versions of
-+ FreeBSD (controlled by the <constant>MALLOC_PRODUCTION</constant> make
-+ variable).</para>
++ <option>--enable-lazy-lock</option>, <option>--enable-stats</option>,
++ <option>--enable-utrace</option>, and <option>--enable-xmalloc</option>.
++ Additionally, <option>--enable-debug</option> is enabled in development
++ versions of FreeBSD (controlled by the
++ <constant>MALLOC_PRODUCTION</constant> make variable).</para>
+
</refsect1>
<refsynopsisdiv>
@@ -27,7 +25,7 @@ index c97ab0f..be8dda5 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
-@@ -2989,4 +3001,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -3252,4 +3262,18 @@ malloc_conf = "narenas:1";]]></programlisting></para>
<para>The <function>posix_memalign()</function> function conforms
to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
</refsect1>
@@ -46,42 +44,42 @@ index c97ab0f..be8dda5 100644
+ 11.0.</para>
+ </refsect1>
</refentry>
-diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
-index 119e3a5..277989f 100644
---- a/include/jemalloc/internal/arena.h
-+++ b/include/jemalloc/internal/arena.h
-@@ -731,8 +731,13 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
- JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
- arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
- {
-+#if 1 /* Work around gcc bug. */
-+ arena_chunk_t *mchunk = (arena_chunk_t *)chunk;
+diff --git a/include/jemalloc/internal/hooks.h b/include/jemalloc/internal/hooks.h
+index cd49afcb..85e2a991 100644
+--- a/include/jemalloc/internal/hooks.h
++++ b/include/jemalloc/internal/hooks.h
+@@ -6,13 +6,6 @@ extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
-+ return (arena_miscelm_get_mutable(mchunk, pageind));
-+#else
- return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
-+#endif
- }
+ #define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
- JEMALLOC_ALWAYS_INLINE size_t
-@@ -791,8 +796,13 @@ arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
- JEMALLOC_ALWAYS_INLINE const size_t *
- arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
- {
-+#if 1 /* Work around gcc bug. */
-+ arena_chunk_t *mchunk = (arena_chunk_t *)chunk;
+-#define open JEMALLOC_HOOK(open, hooks_libc_hook)
+-#define read JEMALLOC_HOOK(read, hooks_libc_hook)
+-#define write JEMALLOC_HOOK(write, hooks_libc_hook)
+-#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
+-#define close JEMALLOC_HOOK(close, hooks_libc_hook)
+-#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
+-#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
+ /* Note that this is undef'd and re-define'd in src/prof.c. */
+ #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
-+ return (arena_mapbitsp_get_mutable(mchunk, pageind));
-+#else
- return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
-+#endif
- }
+diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h
+index 1efdb56b..12a7e5a8 100644
+--- a/include/jemalloc/internal/jemalloc_internal_decls.h
++++ b/include/jemalloc/internal/jemalloc_internal_decls.h
+@@ -1,6 +1,9 @@
+ #ifndef JEMALLOC_INTERNAL_DECLS_H
+ #define JEMALLOC_INTERNAL_DECLS_H
- JEMALLOC_ALWAYS_INLINE size_t
-diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index e3b499a..827fdbf 100644
---- a/include/jemalloc/internal/jemalloc_internal.h.in
-+++ b/include/jemalloc/internal/jemalloc_internal.h.in
++#include "libc_private.h"
++#include "namespace.h"
++
+ #include <math.h>
+ #ifdef _WIN32
+ # include <windows.h>
+diff --git a/include/jemalloc/internal/jemalloc_preamble.h.in b/include/jemalloc/internal/jemalloc_preamble.h.in
+index 18539a09..c8af8683 100644
+--- a/include/jemalloc/internal/jemalloc_preamble.h.in
++++ b/include/jemalloc/internal/jemalloc_preamble.h.in
@@ -8,6 +8,9 @@
#include <sys/ktrace.h>
#endif
@@ -89,10 +87,10 @@ index e3b499a..827fdbf 100644
+#include "un-namespace.h"
+#include "libc_private.h"
+
- #define JEMALLOC_NO_DEMANGLE
+ #define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
- # define JEMALLOC_N(n) jet_##n
-@@ -42,13 +45,7 @@ static const bool config_fill =
+ # undef JEMALLOC_IS_MALLOC
+@@ -68,13 +71,7 @@ static const bool config_fill =
false
#endif
;
@@ -107,25 +105,11 @@ index e3b499a..827fdbf 100644
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
-diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h
-index c907d91..4626632 100644
---- a/include/jemalloc/internal/jemalloc_internal_decls.h
-+++ b/include/jemalloc/internal/jemalloc_internal_decls.h
-@@ -1,6 +1,9 @@
- #ifndef JEMALLOC_INTERNAL_DECLS_H
- #define JEMALLOC_INTERNAL_DECLS_H
-
-+#include "libc_private.h"
-+#include "namespace.h"
-+
- #include <math.h>
- #ifdef _WIN32
- # include <windows.h>
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
-index 2b4b1c3..e03a6d0 100644
+index 6520c251..0013cbe9 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
-@@ -57,9 +57,6 @@ struct malloc_mutex_s {
+@@ -121,9 +121,6 @@ struct malloc_mutex_s {
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
@@ -134,33 +118,21 @@ index 2b4b1c3..e03a6d0 100644
-# define isthreaded true
#endif
- bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
-@@ -67,6 +64,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
- void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
- void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-+bool malloc_mutex_first_thread(void);
- bool malloc_mutex_boot(void);
+ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+@@ -131,6 +128,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+ void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+ void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
++bool malloc_mutex_first_thread(void);
+ bool malloc_mutex_boot(void);
+ void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
- #endif /* JEMALLOC_H_EXTERNS */
-diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
-index 60b57e5..056a8fe 100644
---- a/include/jemalloc/internal/private_symbols.txt
-+++ b/include/jemalloc/internal/private_symbols.txt
-@@ -312,7 +312,6 @@ iralloct_realign
- isalloc
- isdalloct
- isqalloc
--isthreaded
- ivsalloc
- ixalloc
- jemalloc_postfork_child
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
-index 0000000..c58a8f3
+index 00000000..355b565c
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
-@@ -0,0 +1,162 @@
+@@ -0,0 +1,185 @@
+/*
+ * Override settings that were generated in jemalloc_defs.h as necessary.
+ */
@@ -173,51 +145,65 @@ index 0000000..c58a8f3
+
+#undef JEMALLOC_DSS
+
++#undef JEMALLOC_BACKGROUND_THREAD
++
+/*
+ * The following are architecture-dependent, so conditionally define them for
+ * each supported architecture.
+ */
+#undef JEMALLOC_TLS_MODEL
+#undef STATIC_PAGE_SHIFT
++#undef LG_VADDR
+#undef LG_SIZEOF_PTR
+#undef LG_SIZEOF_INT
+#undef LG_SIZEOF_LONG
+#undef LG_SIZEOF_INTMAX_T
+
+#ifdef __i386__
++# define LG_VADDR 32
+# define LG_SIZEOF_PTR 2
+# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __ia64__
++# define LG_VADDR 64
+# define LG_SIZEOF_PTR 3
+#endif
+#ifdef __sparc64__
++# define LG_VADDR 64
+# define LG_SIZEOF_PTR 3
+# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __amd64__
++# define LG_VADDR 48
+# define LG_SIZEOF_PTR 3
+# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
+#endif
+#ifdef __arm__
++# define LG_VADDR 32
+# define LG_SIZEOF_PTR 2
+#endif
+#ifdef __aarch64__
++# define LG_VADDR 48
+# define LG_SIZEOF_PTR 3
+#endif
+#ifdef __mips__
+#ifdef __mips_n64
++# define LG_VADDR 64
+# define LG_SIZEOF_PTR 3
+#else
++# define LG_VADDR 32
+# define LG_SIZEOF_PTR 2
+#endif
+#endif
+#ifdef __powerpc64__
++# define LG_VADDR 64
+# define LG_SIZEOF_PTR 3
+#elif defined(__powerpc__)
++# define LG_VADDR 32
+# define LG_SIZEOF_PTR 2
+#endif
+#ifdef __riscv__
++# define LG_VADDR 64
+# define LG_SIZEOF_PTR 3
+#endif
+
@@ -291,8 +277,17 @@ index 0000000..c58a8f3
+#define read _read
+#define write _write
+#define close _close
++#define pthread_join _pthread_join
++#define pthread_once _pthread_once
++#define pthread_self _pthread_self
++#define pthread_equal _pthread_equal
+#define pthread_mutex_lock _pthread_mutex_lock
++#define pthread_mutex_trylock _pthread_mutex_trylock
+#define pthread_mutex_unlock _pthread_mutex_unlock
++#define pthread_cond_init _pthread_cond_init
++#define pthread_cond_wait _pthread_cond_wait
++#define pthread_cond_timedwait _pthread_cond_timedwait
++#define pthread_cond_signal _pthread_cond_signal
+
+#ifdef JEMALLOC_C_
+/*
@@ -324,7 +319,7 @@ index 0000000..c58a8f3
+__weak_reference(__nallocm, nallocm);
+#endif
diff --git a/include/jemalloc/jemalloc_rename.sh b/include/jemalloc/jemalloc_rename.sh
-index f943891..47d032c 100755
+index f9438912..47d032c1 100755
--- a/include/jemalloc/jemalloc_rename.sh
+++ b/include/jemalloc/jemalloc_rename.sh
@@ -19,4 +19,6 @@ done
@@ -335,10 +330,10 @@ index f943891..47d032c 100755
+#include "jemalloc_FreeBSD.h"
EOF
diff --git a/src/jemalloc.c b/src/jemalloc.c
-index f73a26c..fcfe204 100644
+index 52c86aa6..868c9e86 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
-@@ -4,6 +4,10 @@
+@@ -20,6 +20,10 @@
/******************************************************************************/
/* Data. */
@@ -349,7 +344,7 @@ index f73a26c..fcfe204 100644
/* Runtime configuration options. */
const char *je_malloc_conf
#ifndef _WIN32
-@@ -2781,6 +2785,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+@@ -2981,6 +2985,103 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
*/
/******************************************************************************/
/*
@@ -366,78 +361,74 @@ index f73a26c..fcfe204 100644
+#define ALLOCM_ERR_NOT_MOVED 2
+
+int
-+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
-+{
-+ void *p;
-+
++je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
+ assert(ptr != NULL);
+
-+ p = je_mallocx(size, flags);
-+ if (p == NULL)
++ void *p = je_mallocx(size, flags);
++ if (p == NULL) {
+ return (ALLOCM_ERR_OOM);
-+ if (rsize != NULL)
-+ *rsize = isalloc(tsdn_fetch(), p, config_prof);
++ }
++ if (rsize != NULL) {
++ *rsize = isalloc(tsdn_fetch(), p);
++ }
+ *ptr = p;
-+ return (ALLOCM_SUCCESS);
++ return ALLOCM_SUCCESS;
+}
+
+int
-+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
-+{
-+ int ret;
-+ bool no_move = flags & ALLOCM_NO_MOVE;
-+
++je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
+ assert(ptr != NULL);
+ assert(*ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+
++ int ret;
++ bool no_move = flags & ALLOCM_NO_MOVE;
++
+ if (no_move) {
+ size_t usize = je_xallocx(*ptr, size, extra, flags);
+ ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
-+ if (rsize != NULL)
++ if (rsize != NULL) {
+ *rsize = usize;
++ }
+ } else {
+ void *p = je_rallocx(*ptr, size+extra, flags);
+ if (p != NULL) {
+ *ptr = p;
+ ret = ALLOCM_SUCCESS;
-+ } else
++ } else {
+ ret = ALLOCM_ERR_OOM;
-+ if (rsize != NULL)
-+ *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
++ }
++ if (rsize != NULL) {
++ *rsize = isalloc(tsdn_fetch(), *ptr);
++ }
+ }
-+ return (ret);
++ return ret;
+}
+
+int
-+je_sallocm(const void *ptr, size_t *rsize, int flags)
-+{
-+
++je_sallocm(const void *ptr, size_t *rsize, int flags) {
+ assert(rsize != NULL);
+ *rsize = je_sallocx(ptr, flags);
-+ return (ALLOCM_SUCCESS);
++ return ALLOCM_SUCCESS;
+}
+
+int
-+je_dallocm(void *ptr, int flags)
-+{
-+
++je_dallocm(void *ptr, int flags) {
+ je_dallocx(ptr, flags);
-+ return (ALLOCM_SUCCESS);
++ return ALLOCM_SUCCESS;
+}
+
+int
-+je_nallocm(size_t *rsize, size_t size, int flags)
-+{
-+ size_t usize;
-+
-+ usize = je_nallocx(size, flags);
-+ if (usize == 0)
-+ return (ALLOCM_ERR_OOM);
-+ if (rsize != NULL)
++je_nallocm(size_t *rsize, size_t size, int flags) {
++ size_t usize = je_nallocx(size, flags);
++ if (usize == 0) {
++ return ALLOCM_ERR_OOM;
++ }
++ if (rsize != NULL) {
+ *rsize = usize;
-+ return (ALLOCM_SUCCESS);
++ }
++ return ALLOCM_SUCCESS;
+}
+
+#undef ALLOCM_LG_ALIGN
@@ -457,7 +448,7 @@ index f73a26c..fcfe204 100644
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
-@@ -2922,4 +3027,11 @@ jemalloc_postfork_child(void)
+@@ -3141,4 +3242,11 @@ jemalloc_postfork_child(void) {
ctl_postfork_child(tsd_tsdn(tsd));
}
@@ -469,11 +460,36 @@ index f73a26c..fcfe204 100644
+}
+
/******************************************************************************/
+diff --git a/src/malloc_io.c b/src/malloc_io.c
+index 6b99afcd..4363cb83 100644
+--- a/src/malloc_io.c
++++ b/src/malloc_io.c
+@@ -88,6 +88,20 @@ wrtmessage(void *cbopaque, const char *s) {
+
+ JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
+
++JEMALLOC_ATTR(visibility("hidden"))
++void
++wrtmessage_1_0(const char *s1, const char *s2, const char *s3, const char *s4) {
++
++ wrtmessage(NULL, s1);
++ wrtmessage(NULL, s2);
++ wrtmessage(NULL, s3);
++ wrtmessage(NULL, s4);
++}
++
++void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3,
++ const char *s4) = wrtmessage_1_0;
++__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
++
+ /*
+ * Wrapper around malloc_message() that avoids the need for
+ * je_malloc_message(...) throughout the code.
diff --git a/src/mutex.c b/src/mutex.c
-index 6333e73..13f8d79 100644
+index a528ef0c..820af613 100644
--- a/src/mutex.c
+++ b/src/mutex.c
-@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
+@@ -40,6 +40,17 @@ pthread_create(pthread_t *__restrict thread,
#ifdef JEMALLOC_MUTEX_INIT_CB
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
@@ -490,24 +506,12 @@ index 6333e73..13f8d79 100644
+}
#endif
- bool
-@@ -142,7 +153,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
+ void
+@@ -130,6 +141,16 @@ mutex_addr_comp(const witness_t *witness1, void *mutex1,
}
bool
--malloc_mutex_boot(void)
-+malloc_mutex_first_thread(void)
- {
-
- #ifdef JEMALLOC_MUTEX_INIT_CB
-@@ -156,3 +167,14 @@ malloc_mutex_boot(void)
- #endif
- return (false);
- }
-+
-+bool
-+malloc_mutex_boot(void)
-+{
++malloc_mutex_first_thread(void) {
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ return (malloc_mutex_first_thread());
@@ -515,30 +519,8 @@ index 6333e73..13f8d79 100644
+ return (false);
+#endif
+}
-diff --git a/src/util.c b/src/util.c
-index dd8c236..a4ff287 100644
---- a/src/util.c
-+++ b/src/util.c
-@@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)
-
- JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
-
-+JEMALLOC_ATTR(visibility("hidden"))
-+void
-+wrtmessage_1_0(const char *s1, const char *s2, const char *s3,
-+ const char *s4)
-+{
-+
-+ wrtmessage(NULL, s1);
-+ wrtmessage(NULL, s2);
-+ wrtmessage(NULL, s3);
-+ wrtmessage(NULL, s4);
-+}
-+
-+void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3,
-+ const char *s4) = wrtmessage_1_0;
-+__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
+
- /*
- * Wrapper around malloc_message() that avoids the need for
- * je_malloc_message(...) throughout the code.
++bool
+ malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
+ mutex_prof_data_init(&mutex->prof_data);
diff --git a/contrib/jemalloc/FREEBSD-upgrade b/contrib/jemalloc/FREEBSD-upgrade
index 6ee6cc91e42e..2a4a0c9611d6 100755
--- a/contrib/jemalloc/FREEBSD-upgrade
+++ b/contrib/jemalloc/FREEBSD-upgrade
@@ -22,19 +22,19 @@
#
# Extract latest jemalloc release.
#
-# ./FREEBSD-upgrade extract
+# ./FREEBSD-upgrade extract <rev>
#
# Fix patch conflicts as necessary, then regenerate diffs to update line
# offsets:
#
# ./FREEBSD-upgrade rediff
-# ./FREEBSD-upgrade extract
+# ./FREEBSD-upgrade extract <rev>
#
# Do multiple buildworld/installworld rounds. If problems arise and patches
# are needed, edit the code in ${work} as necessary, then:
#
# ./FREEBSD-upgrade rediff
-# ./FREEBSD-upgrade extract
+# ./FREEBSD-upgrade extract <rev>
#
# The rediff/extract order is important because rediff saves the local
# changes, then extract blows away the work tree and re-creates it with the
@@ -45,43 +45,98 @@
# ./FREEBSD-upgrade clean
set -e
+set -x
if [ ! -x "FREEBSD-upgrade" ] ; then
echo "Run from within src/contrib/jemalloc/" >&2
exit 1
fi
+if [ "x${JEMALLOC_REPO}" = "x" ] ; then
+ JEMALLOC_REPO=https://github.com/jemalloc/jemalloc.git
+fi
+
src=`pwd`
-workname="jemalloc.git"
-work="${src}/../${workname}" # merge-changes expects ${workname} in "..".
+
+jemalloc_tmp="jemalloc.tmp"
+tmpdir="${src}/../${jemalloc_tmp}"
+bare_repo="${tmpdir}/jemalloc_bare.git"
+work="jemalloc_work.git"
+work_repo="${tmpdir}/${work}"
+namespace_repo="${tmpdir}/jemalloc_namespace.git"
changes="${src}/FREEBSD-changes"
-do_extract() {
+do_fetch() {
local rev=$1
- # Clone.
- rm -rf ${work}
- git clone https://github.com/jemalloc/jemalloc.git ${work}
+ if [ ! -d "${bare_repo}" ] ; then
+ mkdir -p "${bare_repo}"
+ git clone --bare ${JEMALLOC_REPO} ${bare_repo}
+ fi
(
- cd ${work}
+ cd ${bare_repo}
+ git fetch origin ${rev}
+ )
+}
+
+do_extract_helper() {
+ local rev=$1
+ local repo=$2
+ do_fetch ${rev}
+ rm -rf ${repo}
+ git clone ${bare_repo} ${repo}
+ (
+ cd ${repo}
if [ "x${rev}" != "x" ] ; then
# Use optional rev argument to check out a revision other than HEAD on
# master.
git checkout ${rev}
fi
+ )
+}
+
+do_autogen() {
+ ./autogen.sh --enable-xmalloc --enable-utrace \
+ --with-xslroot=/usr/local/share/xsl/docbook --with-private-namespace=__ \
+ --with-lg-page-sizes=12,13,14,16
+}
+
+do_extract_diff() {
+ local rev=$1
+ local repo=$2
+ do_extract_helper ${rev} ${repo}
+ (
+ cd ${repo}
# Apply diffs before generating files.
patch -p1 < "${src}/FREEBSD-diffs"
find . -name '*.orig' -delete
- # Generate various files.
- ./autogen.sh --enable-cc-silence --enable-xmalloc --enable-utrace \
- --with-xslroot=/usr/local/share/xsl/docbook --with-private-namespace=__ \
- --with-lg-page-sizes=12,13,14,16
+ # Generate files.
+ do_autogen
gmake dist
)
}
+do_extract_namespace() {
+ local rev=$1
+ local repo=$2
+ do_extract_helper ${rev} ${repo}
+ (
+ cd ${repo}
+ # Generate files.
+ do_autogen
+ gmake include/jemalloc/internal/private_namespace.h
+ )
+}
+
+do_extract() {
+ local rev=$1
+ do_fetch ${rev}
+ do_extract_diff ${rev} ${work_repo}
+ do_extract_namespace ${rev} ${namespace_repo}
+}
+
do_diff() {
(
- cd ${work}
+ cd ${work_repo}
find . -name '*.orig' -delete
find . -name '*.rej' -delete
git add -A
@@ -98,12 +153,12 @@ case "${command}" in
do_extract ${rev}
# Compute local differences to the upstream+patches and apply them.
(
- cd ..
- diff -ru -X ${src}/FREEBSD-Xlist ${workname} jemalloc > ${changes} || true
+ cd ${tmpdir}
+ diff -ru -X ${src}/FREEBSD-Xlist ${work} ../jemalloc > ${changes} || true
)
(
- cd ${work}
- patch -p1 < ${changes}
+ cd ${work_repo}
+ patch -p1 < ${changes} || true
find . -name '*.orig' -delete
)
# Update diff.
@@ -115,13 +170,17 @@ case "${command}" in
# Delete existing files so that cruft doesn't silently remain.
rm -rf ChangeLog COPYING VERSION doc include src
# Copy files over.
- tar cf - -C ${work} -X FREEBSD-Xlist . |tar xvf -
+ tar cf - -C ${work_repo} -X FREEBSD-Xlist . |tar xvf -
+ internal_dir="include/jemalloc/internal"
+ grep -v ' isthreaded ' \
+ "${namespace_repo}/${internal_dir}/private_namespace.h" \
+ > "${internal_dir}/private_namespace.h"
;;
rediff) # Regenerate diffs based on working tree.
do_diff
;;
clean) # Remove working tree and temporary files.
- rm -rf ${work} ${changes}
+ rm -rf ${tmpdir} ${changes}
;;
*)
echo "Unsupported command: \"${command}\"" >&2
diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index 59deb3f8aa54..e5f1992caa25 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-4.5.0-0-g04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5
+5.0.0-4-g84f6c2cae0fb1399377ef6aea9368444c4987cc6
diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3
index b8fb09e878c1..af3b5dfbe7d7 100644
--- a/contrib/jemalloc/doc/jemalloc.3
+++ b/contrib/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: 02/28/2017
+.\" Date: 06/14/2017
.\" Manual: User Manual
-.\" Source: jemalloc 4.5.0-0-g04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5
+.\" Source: jemalloc 5.0.0-4-g84f6c2cae0fb1399377ef6aea9368444c4987cc6
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "02/28/2017" "jemalloc 4.5.0-0-g04380e79f1e2" "User Manual"
+.TH "JEMALLOC" "3" "06/14/2017" "jemalloc 5.0.0-4-g84f6c2cae0fb" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,16 +31,13 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 4\&.5\&.0\-0\-g04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5\&. More information can be found at the
+This manual describes jemalloc 5\&.0\&.0\-4\-g84f6c2cae0fb1399377ef6aea9368444c4987cc6\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
\fB\-\-enable\-fill\fR,
\fB\-\-enable\-lazy\-lock\fR,
-\fB\-\-enable\-munmap\fR,
\fB\-\-enable\-stats\fR,
-\fB\-\-enable\-tcache\fR,
-\fB\-\-enable\-tls\fR,
\fB\-\-enable\-utrace\fR, and
\fB\-\-enable\-xmalloc\fR\&. Additionally,
\fB\-\-enable\-debug\fR
@@ -373,6 +370,9 @@ for (i = 0; i < nbins; i++) {
.RE
.\}
.PP
+.RS 4
+.RE
+.PP
The
malloc_stats_print()
function writes summary statistics via the
@@ -401,14 +401,16 @@ mallctl*()
functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
\fB\-\-enable\-stats\fR
is specified during configuration,
-\(lqm\(rq
-and
+\(lqm\(rq,
+\(lqd\(rq, and
\(lqa\(rq
-can be specified to omit merged arena and per arena statistics, respectively;
-\(lqb\(rq,
-\(lql\(rq, and
-\(lqh\(rq
-can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
+can be specified to omit merged arena, destroyed merged arena, and per arena statistics, respectively;
+\(lqb\(rq
+and
+\(lql\(rq
+can be specified to omit per size class statistics for bins and large objects, respectively;
+\(lqx\(rq
+can be specified to omit all mutex statistics\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
.PP
The
malloc_usable_size()
@@ -471,16 +473,12 @@ is used\&.
.PP
This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&.
.PP
-In addition to multiple arenas, unless
-\fB\-\-disable\-tcache\fR
-is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&.
+In addition to multiple arenas, this allocator supports thread\-specific caching, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&.
.PP
-Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&. User objects are broken into three categories according to size: small, large, and huge\&. Multiple small and large objects can reside within a single chunk, whereas huge objects each have one or more chunks backing them\&. Each chunk that contains small and/or large objects tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&.
+Memory is conceptually broken into extents\&. Extents are always aligned to multiples of the page size\&. This alignment makes it possible to find metadata for user objects quickly\&. User objects are broken into two categories according to size: small and large\&. Contiguous small objects comprise a slab, which resides within a single extent, whereas large objects each have their own extents backing them\&.
.PP
-Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
-sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the
-opt\&.lg_chunk
-option), and huge size classes extend from the chunk size up to the largest size class that does not exceed
+Small objects are managed in groups by slabs\&. Each slab maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
+sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, and large size classes extend from four times the page size up to the largest size class that does not exceed
\fBPTRDIFF_MAX\fR\&.
.PP
Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
@@ -493,10 +491,10 @@ functions may resize allocations without moving them under limited circumstances
*allocx()
API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call
realloc()
-to grow e\&.g\&. a 9\-byte allocation to 16 bytes, or shrink a 16\-byte allocation to 9 bytes\&. Growth and shrinkage trivially succeeds in place as long as the pre\-size and post\-size both round up to the same size class\&. No other API guarantees are made regarding in\-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre\-size and post\-size are both large or both huge\&. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see
-arena\&.<i>\&.chunk_hooks)\&. Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging\&.
+to grow e\&.g\&. a 9\-byte allocation to 16 bytes, or shrink a 16\-byte allocation to 9 bytes\&. Growth and shrinkage trivially succeeds in place as long as the pre\-size and post\-size both round up to the same size class\&. No other API guarantees are made regarding in\-place resizing, but the current implementation also tries to resize large allocations in place, as long as the pre\-size and post\-size are both large\&. For shrinkage to succeed, the extent allocator must support splitting (see
+arena\&.<i>\&.extent_hooks)\&. Growth only succeeds if the trailing memory is currently available, and the extent allocator supports merging\&.
.PP
-Assuming 2 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
+Assuming 4 KiB pages and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
Table 1\&.
.sp
.it 1 an-trap
@@ -532,7 +530,6 @@ l r l
^ r l
^ r l
^ r l
-l r l
^ r l
^ r l
^ r l
@@ -628,14 +625,7 @@ T}
:T{
256 KiB
T}:T{
-[1280 KiB, 1536 KiB, 1792 KiB]
-T}
-T{
-Huge
-T}:T{
-256 KiB
-T}:T{
-[2 MiB]
+[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]
T}
:T{
512 KiB
@@ -692,11 +682,36 @@ r\-,
or
<j>
indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of
-stats\&.arenas\&.<i>\&.*,
+stats\&.arenas\&.<i>\&.*
+and
+arena\&.<i>\&.{initialized,purge,decay,dss},
<i>
equal to
-arenas\&.narenas
-can be used to access the summation of statistics from all arenas\&. Take special note of the
+\fBMALLCTL_ARENAS_ALL\fR
+can be used to operate on all arenas or access the summation of statistics from all arenas; similarly
+<i>
+equal to
+\fBMALLCTL_ARENAS_DESTROYED\fR
+can be used to access the summation of statistics from all destroyed arenas\&. These constants can be utilized either via
+mallctlnametomib()
+followed by
+mallctlbymib(), or via code such as the following:
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
+mallctl("arena\&." STRINGIFY(MALLCTL_ARENAS_ALL) "\&.decay",
+ NULL, NULL, NULL, 0);
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+Take special note of the
epoch
mallctl, which controls refreshing of cached dynamic statistics\&.
.PP
@@ -712,6 +727,19 @@ mallctl*()
functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&.
.RE
.PP
+background_thread (\fBbool\fR) rw
+.RS 4
+Enable/disable internal background worker threads\&. When set to true, background threads are created on demand (the number of background threads will be no more than the number of CPUs or active arenas)\&. Threads run periodically, and handle
+purging
+asynchronously\&. When switching off, background threads are terminated synchronously\&. Note that after
+\fBfork\fR(2)
+function, the state in the child process will be disabled regardless the state in parent process\&. See
+stats\&.background_thread
+for related stats\&.
+opt\&.background_thread
+can be used to set the default option\&. This option is only available on selected pthread\-based platforms\&.
+.RE
+.PP
config\&.cache_oblivious (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-cache\-oblivious\fR
@@ -743,12 +771,6 @@ Embedded configure\-time\-specified run\-time options string, empty unless
was specified during build configuration\&.
.RE
.PP
-config\&.munmap (\fBbool\fR) r\-
-.RS 4
-\fB\-\-enable\-munmap\fR
-was specified during build configuration\&.
-.RE
-.PP
config\&.prof (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-prof\fR
@@ -773,36 +795,18 @@ config\&.stats (\fBbool\fR) r\-
was specified during build configuration\&.
.RE
.PP
-config\&.tcache (\fBbool\fR) r\-
-.RS 4
-\fB\-\-disable\-tcache\fR
-was not specified during build configuration\&.
-.RE
-.PP
config\&.thp (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-thp\fR
was not specified during build configuration, and the system supports transparent huge page manipulation\&.
.RE
.PP
-config\&.tls (\fBbool\fR) r\-
-.RS 4
-\fB\-\-disable\-tls\fR
-was not specified during build configuration\&.
-.RE
-.PP
config\&.utrace (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-utrace\fR
was specified during build configuration\&.
.RE
.PP
-config\&.valgrind (\fBbool\fR) r\-
-.RS 4
-\fB\-\-enable\-valgrind\fR
-was specified during build configuration\&.
-.RE
-.PP
config\&.xmalloc (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-xmalloc\fR
@@ -811,13 +815,35 @@ was specified during build configuration\&.
.PP
opt\&.abort (\fBbool\fR) r\-
.RS 4
-Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call
+Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. Note that runtime option warnings are not included (see
+opt\&.abort_conf
+for that)\&. The process will call
\fBabort\fR(3)
in these cases\&. This option is disabled by default unless
\fB\-\-enable\-debug\fR
is specified during configuration, in which case it is enabled by default\&.
.RE
.PP
+opt\&.abort_conf (\fBbool\fR) r\-
+.RS 4
+Abort\-on\-invalid\-configuration enabled/disabled\&. If true, invalid runtime options are fatal\&. The process will call
+\fBabort\fR(3)
+in these cases\&. This option is disabled by default unless
+\fB\-\-enable\-debug\fR
+is specified during configuration, in which case it is enabled by default\&.
+.RE
+.PP
+opt\&.retain (\fBbool\fR) r\-
+.RS 4
+If true, retain unused virtual memory for later reuse rather than discarding it by calling
+\fBmunmap\fR(2)
+or equivalent (see
+stats\&.retained
+for related details)\&. This option is disabled by default unless discarding virtual memory is known to trigger platform\-specific performance problems, e\&.g\&. for [64\-bit] Linux, which has a quirk in its virtual memory allocation algorithm that causes semi\-permanent VM map holes under normal jemalloc operation\&. Although
+\fBmunmap\fR(2)
+causes issues on 32\-bit Linux as well, retaining virtual memory for 32\-bit Linux is disabled by default due to the practical possibility of address space exhaustion\&.
+.RE
+.PP
opt\&.dss (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
@@ -838,42 +864,49 @@ is supported by the operating system;
otherwise\&.
.RE
.PP
-opt\&.lg_chunk (\fBsize_t\fR) r\-
+opt\&.narenas (\fBunsigned\fR) r\-
.RS 4
-Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 2 MiB (2^21)\&.
+Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&.
.RE
.PP
-opt\&.narenas (\fBunsigned\fR) r\-
+opt\&.percpu_arena (\fBconst char *\fR) r\-
.RS 4
-Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&.
+Per CPU arena mode\&. Use the
+\(lqpercpu\(rq
+setting to enable this feature, which uses number of CPUs to determine number of arenas, and bind threads to arenas dynamically based on the CPU the thread runs on currently\&.
+\(lqphycpu\(rq
+setting uses one arena per physical CPU, which means the two hyper threads on the same CPU share one arena\&. Note that no runtime checking regarding the availability of hyper threading is done at the moment\&. When set to
+\(lqdisabled\(rq, narenas and thread to arena association will not be impacted by this option\&. The default is
+\(lqdisabled\(rq\&.
.RE
.PP
-opt\&.purge (\fBconst char *\fR) r\-
+opt\&.background_thread (\fBconst bool\fR) r\-
.RS 4
-Purge mode is \(lqratio\(rq (default) or \(lqdecay\(rq\&. See
-opt\&.lg_dirty_mult
-for details of the ratio mode\&. See
-opt\&.decay_time
-for details of the decay mode\&.
+Internal background worker threads enabled/disabled\&. See
+background_thread
+for dynamic control options and details\&. This option is disabled by default\&.
.RE
.PP
-opt\&.lg_dirty_mult (\fBssize_t\fR) r\-
+opt\&.dirty_decay_ms (\fBssize_t\fR) r\-
.RS 4
-Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
-\fBmadvise\fR(2)
-or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&. See
-arenas\&.lg_dirty_mult
+Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged (i\&.e\&. converted to muzzy via e\&.g\&.
+madvise(\fI\&.\&.\&.\fR\fI\fBMADV_FREE\fR\fR)
+if supported by the operating system, or converted to clean otherwise) and/or reused\&. Dirty pages are defined as previously having been potentially written to by the application, and therefore consuming physical memory, yet having no current use\&. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate\&. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation\&. A decay time of \-1 disables purging\&. The default decay time is 10 seconds\&. See
+arenas\&.dirty_decay_ms
and
-arena\&.<i>\&.lg_dirty_mult
-for related dynamic control options\&.
+arena\&.<i>\&.muzzy_decay_ms
+for related dynamic control options\&. See
+opt\&.muzzy_decay_ms
+for a description of muzzy pages\&.
.RE
.PP
-opt\&.decay_time (\fBssize_t\fR) r\-
+opt\&.muzzy_decay_ms (\fBssize_t\fR) r\-
.RS 4
-Approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate\&. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation\&. A decay time of \-1 disables purging\&. The default decay time is 10 seconds\&. See
-arenas\&.decay_time
+Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged (i\&.e\&. converted to clean) and/or reused\&. Muzzy pages are defined as previously having been unused dirty pages that were subsequently purged in a manner that left them subject to the reclamation whims of the operating system (e\&.g\&.
+madvise(\fI\&.\&.\&.\fR\fI\fBMADV_FREE\fR\fR)), and therefore in an indeterminate state\&. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate\&. A decay time of 0 causes all unused muzzy pages to be purged immediately upon creation\&. A decay time of \-1 disables purging\&. The default decay time is 10 seconds\&. See
+arenas\&.muzzy_decay_ms
and
-arena\&.<i>\&.decay_time
+arena\&.<i>\&.muzzy_decay_ms
for related dynamic control options\&.
.RE
.PP
@@ -883,7 +916,9 @@ Enable/disable statistics printing at exit\&. If enabled, the
malloc_stats_print()
function is called at program exit via an
\fBatexit\fR(3)
-function\&. If
+function\&.
+opt\&.stats_print_opts
+can be combined to specify output options\&. If
\fB\-\-enable\-stats\fR
is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore,
atexit()
@@ -893,6 +928,20 @@ atexit()
function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
.RE
.PP
+opt\&.stats_print_opts (\fBconst char *\fR) r\-
+.RS 4
+Options (the
+\fIopts\fR
+string) to pass to the
+malloc_stats_print()
+at exit (enabled through
+opt\&.stats_print)\&. See available options in
+malloc_stats_print()\&. Has no effect unless
+opt\&.stats_print
+is enabled\&. The default is
+\(lq\(rq\&.
+.RE
+.PP
opt\&.junk (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Junk filling\&. If set to
@@ -907,24 +956,7 @@ by default unless
\fB\-\-enable\-debug\fR
is specified during configuration, in which case it is
\(lqtrue\(rq
-by default unless running inside
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2\&.
-.RE
-.PP
-opt\&.quarantine (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
-.RS 4
-Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the
-opt\&.junk
-option is enabled\&. This feature is of particular use in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&.
-.RE
-.PP
-opt\&.redzone (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
-.RS 4
-Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the
-opt\&.junk
-option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&.
+by default\&.
.RE
.PP
opt\&.zero (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -963,20 +995,14 @@ malloc_conf = "xmalloc:true";
This option is disabled by default\&.
.RE
.PP
-opt\&.tcache (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR]
+opt\&.tcache (\fBbool\fR) r\-
.RS 4
Thread\-specific caching (tcache) enabled/disabled\&. When there are multiple threads, each thread uses a tcache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
opt\&.lg_tcache_max
-option for related tuning information\&. This option is enabled by default unless running inside
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2, in which case it is forcefully disabled\&.
+option for related tuning information\&. This option is enabled by default\&.
.RE
.PP
-opt\&.thp (\fBbool\fR) r\- [\fB\-\-enable\-thp\fR]
-.RS 4
-Transparent huge page (THP) integration enabled/disabled\&. When enabled, THPs are explicitly disabled as a side effect of unused dirty page purging for chunks that back small and/or large allocations, because such chunks typically comprise active, unused dirty, and untouched clean pages\&. This option is enabled by default\&.
-.RE
-.PP
-opt\&.lg_tcache_max (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
+opt\&.lg_tcache_max (\fBsize_t\fR) r\-
.RS 4
Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
.RE
@@ -1000,7 +1026,7 @@ option for final profile dumping\&. Profile output is compatible with the
command, which is based on the
\fBpprof\fR
that is developed as part of the
-\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[4]\d\s+2\&. See
+\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&. See
HEAP PROFILE FORMAT
for heap profile format documentation\&.
.RE
@@ -1084,7 +1110,7 @@ option for information on analyzing heap profile output\&. This option is disabl
thread\&.arena (\fBunsigned\fR) rw
.RS 4
Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the
-arenas\&.initialized
+arena\&.i\&.initialized
mallctl), it will be automatically initialized as a side effect of calling this interface\&.
.RE
.PP
@@ -1116,13 +1142,13 @@ mallctl*()
calls\&.
.RE
.PP
-thread\&.tcache\&.enabled (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR]
+thread\&.tcache\&.enabled (\fBbool\fR) rw
.RS 4
Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see
thread\&.tcache\&.flush)\&.
.RE
.PP
-thread\&.tcache\&.flush (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR]
+thread\&.tcache\&.flush (\fBvoid\fR) \-\-
.RS 4
Flush calling thread\*(Aqs thread\-specific cache (tcache)\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs tcache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
.RE
@@ -1141,97 +1167,119 @@ Control whether sampling is currently active for the calling thread\&. This is a
prof\&.active; both must be active for the calling thread to sample\&. This flag is enabled by default\&.
.RE
.PP
-tcache\&.create (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
+tcache\&.create (\fBunsigned\fR) r\-
.RS 4
Create an explicit thread\-specific cache (tcache) and return an identifier that can be passed to the
\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR
macro to explicitly use the specified cache rather than the automatically managed one that is used by default\&. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds\&.
.RE
.PP
-tcache\&.flush (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
+tcache\&.flush (\fBunsigned\fR) \-w
.RS 4
Flush the specified thread\-specific cache (tcache)\&. The same considerations apply to this interface as to
thread\&.tcache\&.flush, except that the tcache will never be automatically discarded\&.
.RE
.PP
-tcache\&.destroy (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
+tcache\&.destroy (\fBunsigned\fR) \-w
.RS 4
Flush the specified thread\-specific cache (tcache) and make the identifier available for use during a future tcache creation\&.
.RE
.PP
-arena\&.<i>\&.purge (\fBvoid\fR) \-\-
+arena\&.<i>\&.initialized (\fBbool\fR) r\-
.RS 4
-Purge all unused dirty pages for arena <i>, or for all arenas if <i> equals
-arenas\&.narenas\&.
+Get whether the specified arena\*(Aqs statistics are initialized (i\&.e\&. the arena was initialized prior to the current epoch)\&. This interface can also be nominally used to query whether the merged statistics corresponding to
+\fBMALLCTL_ARENAS_ALL\fR
+are initialized (always true)\&.
.RE
.PP
arena\&.<i>\&.decay (\fBvoid\fR) \-\-
.RS 4
-Trigger decay\-based purging of unused dirty pages for arena <i>, or for all arenas if <i> equals
-arenas\&.narenas\&. The proportion of unused dirty pages to be purged depends on the current time; see
-opt\&.decay_time
+Trigger decay\-based purging of unused dirty/muzzy pages for arena <i>, or for all arenas if <i> equals
+\fBMALLCTL_ARENAS_ALL\fR\&. The proportion of unused dirty/muzzy pages to be purged depends on the current time; see
+opt\&.dirty_decay_ms
+and
+opt\&.muzy_decay_ms
for details\&.
.RE
.PP
+arena\&.<i>\&.purge (\fBvoid\fR) \-\-
+.RS 4
+Purge all unused dirty pages for arena <i>, or for all arenas if <i> equals
+\fBMALLCTL_ARENAS_ALL\fR\&.
+.RE
+.PP
arena\&.<i>\&.reset (\fBvoid\fR) \-\-
.RS 4
-Discard all of the arena\*(Aqs extant allocations\&. This interface can only be used with arenas created via
-arenas\&.extend\&. None of the arena\*(Aqs discarded/cached allocations may accessed afterward\&. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand\&. This interface cannot be used if running inside Valgrind, nor if the
-quarantine
-size is non\-zero\&.
+Discard all of the arena\*(Aqs extant allocations\&. This interface can only be used with arenas explicitly created via
+arenas\&.create\&. None of the arena\*(Aqs discarded/cached allocations may accessed afterward\&. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand\&.
+.RE
+.PP
+arena\&.<i>\&.destroy (\fBvoid\fR) \-\-
+.RS 4
+Destroy the arena\&. Discard all of the arena\*(Aqs extant allocations using the same mechanism as for
+arena\&.<i>\&.reset
+(with all the same constraints and side effects), merge the arena stats into those accessible at arena index
+\fBMALLCTL_ARENAS_DESTROYED\fR, and then completely discard all metadata associated with the arena\&. Future calls to
+arenas\&.create
+may recycle the arena index\&. Destruction will fail if any threads are currently associated with the arena as a result of calls to
+thread\&.arena\&.
.RE
.PP
arena\&.<i>\&.dss (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
-arenas\&.narenas\&. See
+\fBMALLCTL_ARENAS_ALL\fR\&. See
opt\&.dss
for supported settings\&.
.RE
.PP
-arena\&.<i>\&.lg_dirty_mult (\fBssize_t\fR) rw
+arena\&.<i>\&.dirty_decay_ms (\fBssize_t\fR) rw
.RS 4
-Current per\-arena minimum ratio (log base 2) of active to dirty pages for arena <i>\&. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio\&. See
-opt\&.lg_dirty_mult
+Current per\-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. Each time this interface is set, all currently unused dirty pages are considered to have fully decayed, which causes immediate purging of all unused dirty pages unless the decay time is set to \-1 (i\&.e\&. purging disabled)\&. See
+opt\&.dirty_decay_ms
for additional information\&.
.RE
.PP
-arena\&.<i>\&.decay_time (\fBssize_t\fR) rw
+arena\&.<i>\&.muzzy_decay_ms (\fBssize_t\fR) rw
.RS 4
-Current per\-arena approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. Each time this interface is set, all currently unused dirty pages are considered to have fully decayed, which causes immediate purging of all unused dirty pages unless the decay time is set to \-1 (i\&.e\&. purging disabled)\&. See
-opt\&.decay_time
+Current per\-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused\&. Each time this interface is set, all currently unused muzzy pages are considered to have fully decayed, which causes immediate purging of all unused muzzy pages unless the decay time is set to \-1 (i\&.e\&. purging disabled)\&. See
+opt\&.muzzy_decay_ms
for additional information\&.
.RE
.PP
-arena\&.<i>\&.chunk_hooks (\fBchunk_hooks_t\fR) rw
+arena\&.<i>\&.extent_hooks (\fBextent_hooks_t *\fR) rw
.RS 4
-Get or set the chunk management hook functions for arena <i>\&. The functions must be capable of operating on all extant chunks associated with arena <i>, usually by passing unknown chunks to the replaced functions\&. In practice, it is feasible to control allocation for arenas created via
-arenas\&.extend
-such that all chunks originate from an application\-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation\&.
+Get or set the extent management hook functions for arena <i>\&. The functions must be capable of operating on all extant extents associated with arena <i>, usually by passing unknown extents to the replaced functions\&. In practice, it is feasible to control allocation for arenas explicitly created via
+arenas\&.create
+such that all extents originate from an application\-supplied extent allocator (by specifying the custom extent hook functions during arena creation), but the automatically created arenas will have already created extents prior to the application having an opportunity to take over extent allocation\&.
.sp
.if n \{\
.RS 4
.\}
.nf
-typedef struct {
- chunk_alloc_t *alloc;
- chunk_dalloc_t *dalloc;
- chunk_commit_t *commit;
- chunk_decommit_t *decommit;
- chunk_purge_t *purge;
- chunk_split_t *split;
- chunk_merge_t *merge;
-} chunk_hooks_t;
+typedef extent_hooks_s extent_hooks_t;
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
.fi
.if n \{\
.RE
.\}
.sp
The
-\fBchunk_hooks_t\fR
-structure comprises function pointers which are described individually below\&. jemalloc uses these functions to manage chunk lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation\&. However, there are performance and platform reasons to retain chunks for later reuse\&. Cleanup attempts cascade from deallocation to decommit to purging, which gives the chunk management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations\&. The chunk splitting and merging operations can also be opted out of, but this is mainly intended to support platforms on which virtual memory mappings provided by the operating system kernel do not automatically coalesce and split, e\&.g\&. Windows\&.
-.HP \w'typedef\ void\ *(chunk_alloc_t)('u
-.BI "typedef void *(chunk_alloc_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "alignment" ", bool\ *" "zero" ", bool\ *" "commit" ", unsigned\ " "arena_ind" ");"
+\fBextent_hooks_t\fR
+structure comprises function pointers which are described individually below\&. jemalloc uses these functions to manage extent lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation\&. However, there are performance and platform reasons to retain extents for later reuse\&. Cleanup attempts cascade from deallocation to decommit to forced purging to lazy purging, which gives the extent management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations\&. All operations except allocation can be universally opted out of by setting the hook pointers to
+\fBNULL\fR, or selectively opted out of by returning failure\&.
+.HP \w'typedef\ void\ *(extent_alloc_t)('u
+.BI "typedef void *(extent_alloc_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "new_addr" ", size_t\ " "size" ", size_t\ " "alignment" ", bool\ *" "zero" ", bool\ *" "commit" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1242,18 +1290,18 @@ structure comprises function pointers which are described individually below\&.
.RE
.\}
.sp
-A chunk allocation function conforms to the
-\fBchunk_alloc_t\fR
+An extent allocation function conforms to the
+\fBextent_alloc_t\fR
type and upon success returns a pointer to
\fIsize\fR
bytes of mapped memory on behalf of arena
\fIarena_ind\fR
-such that the chunk\*(Aqs base address is a multiple of
+such that the extent\*(Aqs base address is a multiple of
\fIalignment\fR, as well as setting
\fI*zero\fR
-to indicate whether the chunk is zeroed and
+to indicate whether the extent is zeroed and
\fI*commit\fR
-to indicate whether the chunk is committed\&. Upon error the function returns
+to indicate whether the extent is committed\&. Upon error the function returns
\fBNULL\fR
and leaves
\fI*zero\fR
@@ -1261,24 +1309,24 @@ and
\fI*commit\fR
unmodified\&. The
\fIsize\fR
-parameter is always a multiple of the chunk size\&. The
+parameter is always a multiple of the page size\&. The
\fIalignment\fR
-parameter is always a power of two at least as large as the chunk size\&. Zeroing is mandatory if
+parameter is always a power of two at least as large as the page size\&. Zeroing is mandatory if
\fI*zero\fR
is true upon function entry\&. Committing is mandatory if
\fI*commit\fR
is true upon function entry\&. If
-\fIchunk\fR
+\fInew_addr\fR
is not
\fBNULL\fR, the returned pointer must be
-\fIchunk\fR
+\fInew_addr\fR
on success or
\fBNULL\fR
-on error\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. Note that replacing the default chunk allocation function makes the arena\*(Aqs
+on error\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. Note that replacing the default extent allocation function makes the arena\*(Aqs
arena\&.<i>\&.dss
setting irrelevant\&.
-.HP \w'typedef\ bool\ (chunk_dalloc_t)('u
-.BI "typedef bool (chunk_dalloc_t)(void\ *" "chunk" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
+.HP \w'typedef\ bool\ (extent_dalloc_t)('u
+.BI "typedef bool (extent_dalloc_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1289,17 +1337,17 @@ setting irrelevant\&.
.RE
.\}
.sp
-A chunk deallocation function conforms to the
-\fBchunk_dalloc_t\fR
-type and deallocates a
-\fIchunk\fR
-of given
+An extent deallocation function conforms to the
+\fBextent_dalloc_t\fR
+type and deallocates an extent at given
+\fIaddr\fR
+and
\fIsize\fR
with
\fIcommitted\fR/decommited memory as indicated, on behalf of arena
-\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates opt\-out from deallocation; the virtual memory mapping associated with the chunk remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse\&.
-.HP \w'typedef\ bool\ (chunk_commit_t)('u
-.BI "typedef bool (chunk_commit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates opt\-out from deallocation; the virtual memory mapping associated with the extent remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse\&.
+.HP \w'typedef\ void\ (extent_destroy_t)('u
+.BI "typedef void (extent_destroy_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1310,11 +1358,33 @@ with
.RE
.\}
.sp
-A chunk commit function conforms to the
-\fBchunk_commit_t\fR
-type and commits zeroed physical memory to back pages within a
-\fIchunk\fR
-of given
+An extent destruction function conforms to the
+\fBextent_destroy_t\fR
+type and unconditionally destroys an extent at given
+\fIaddr\fR
+and
+\fIsize\fR
+with
+\fIcommitted\fR/decommited memory as indicated, on behalf of arena
+\fIarena_ind\fR\&. This function may be called to destroy retained extents during arena destruction (see
+arena\&.<i>\&.destroy)\&.
+.HP \w'typedef\ bool\ (extent_commit_t)('u
+.BI "typedef bool (extent_commit_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+An extent commit function conforms to the
+\fBextent_commit_t\fR
+type and commits zeroed physical memory to back pages within an extent at given
+\fIaddr\fR
+and
\fIsize\fR
at
\fIoffset\fR
@@ -1322,8 +1392,8 @@ bytes, extending for
\fIlength\fR
on behalf of arena
\fIarena_ind\fR, returning false upon success\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. If the function returns true, this indicates insufficient physical memory to satisfy the request\&.
-.HP \w'typedef\ bool\ (chunk_decommit_t)('u
-.BI "typedef bool (chunk_decommit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+.HP \w'typedef\ bool\ (extent_decommit_t)('u
+.BI "typedef bool (extent_decommit_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1334,20 +1404,20 @@ on behalf of arena
.RE
.\}
.sp
-A chunk decommit function conforms to the
-\fBchunk_decommit_t\fR
-type and decommits any physical memory that is backing pages within a
-\fIchunk\fR
-of given
+An extent decommit function conforms to the
+\fBextent_decommit_t\fR
+type and decommits any physical memory that is backing pages within an extent at given
+\fIaddr\fR
+and
\fIsize\fR
at
\fIoffset\fR
bytes, extending for
\fIlength\fR
on behalf of arena
-\fIarena_ind\fR, returning false upon success, in which case the pages will be committed via the chunk commit function before being reused\&. If the function returns true, this indicates opt\-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse\&.
-.HP \w'typedef\ bool\ (chunk_purge_t)('u
-.BI "typedef bool (chunk_purge_t)(void\ *" "chunk" ", size_t" "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+\fIarena_ind\fR, returning false upon success, in which case the pages will be committed via the extent commit function before being reused\&. If the function returns true, this indicates opt\-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse\&.
+.HP \w'typedef\ bool\ (extent_purge_t)('u
+.BI "typedef bool (extent_purge_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1358,20 +1428,21 @@ on behalf of arena
.RE
.\}
.sp
-A chunk purge function conforms to the
-\fBchunk_purge_t\fR
-type and optionally discards physical pages within the virtual memory mapping associated with
-\fIchunk\fR
-of given
+An extent purge function conforms to the
+\fBextent_purge_t\fR
+type and discards physical pages within the virtual memory mapping associated with an extent at given
+\fIaddr\fR
+and
\fIsize\fR
at
\fIoffset\fR
bytes, extending for
\fIlength\fR
on behalf of arena
-\fIarena_ind\fR, returning false if pages within the purged virtual memory range will be zero\-filled the next time they are accessed\&.
-.HP \w'typedef\ bool\ (chunk_split_t)('u
-.BI "typedef bool (chunk_split_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "size_a" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
+\fIarena_ind\fR\&. A lazy extent purge function (e\&.g\&. implemented via
+madvise(\fI\&.\&.\&.\fR\fI\fBMADV_FREE\fR\fR)) can delay purging indefinitely and leave the pages within the purged virtual memory range in an indeterminite state, whereas a forced extent purge function immediately purges, and the pages within the virtual memory range will be zero\-filled the next time they are accessed\&. If the function returns true, this indicates failure to purge\&.
+.HP \w'typedef\ bool\ (extent_split_t)('u
+.BI "typedef bool (extent_split_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr" ", size_t\ " "size" ", size_t\ " "size_a" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1382,21 +1453,21 @@ on behalf of arena
.RE
.\}
.sp
-A chunk split function conforms to the
-\fBchunk_split_t\fR
-type and optionally splits
-\fIchunk\fR
-of given
+An extent split function conforms to the
+\fBextent_split_t\fR
+type and optionally splits an extent at given
+\fIaddr\fR
+and
\fIsize\fR
-into two adjacent chunks, the first of
+into two adjacent extents, the first of
\fIsize_a\fR
bytes, and the second of
\fIsize_b\fR
bytes, operating on
\fIcommitted\fR/decommitted memory as indicated, on behalf of arena
-\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunk remains unsplit and therefore should continue to be operated on as a whole\&.
-.HP \w'typedef\ bool\ (chunk_merge_t)('u
-.BI "typedef bool (chunk_merge_t)(void\ *" "chunk_a" ", size_t\ " "size_a" ", void\ *" "chunk_b" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
+\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the extent remains unsplit and therefore should continue to be operated on as a whole\&.
+.HP \w'typedef\ bool\ (extent_merge_t)('u
+.BI "typedef bool (extent_merge_t)(extent_hooks_t\ *" "extent_hooks" ", void\ *" "addr_a" ", size_t\ " "size_a" ", void\ *" "addr_b" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
.sp
.if n \{\
.RS 4
@@ -1407,19 +1478,19 @@ bytes, operating on
.RE
.\}
.sp
-A chunk merge function conforms to the
-\fBchunk_merge_t\fR
-type and optionally merges adjacent chunks,
-\fIchunk_a\fR
-of given
+An extent merge function conforms to the
+\fBextent_merge_t\fR
+type and optionally merges adjacent extents, at given
+\fIaddr_a\fR
+and
\fIsize_a\fR
+with given
+\fIaddr_b\fR
and
-\fIchunk_b\fR
-of given
\fIsize_b\fR
-into one contiguous chunk, operating on
+into one contiguous extent, operating on
\fIcommitted\fR/decommitted memory as indicated, on behalf of arena
-\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently\&.
+\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the extents remain distinct mappings and therefore should continue to be operated on independently\&.
.RE
.PP
arenas\&.narenas (\fBunsigned\fR) r\-
@@ -1427,28 +1498,21 @@ arenas\&.narenas (\fBunsigned\fR) r\-
Current limit on number of arenas\&.
.RE
.PP
-arenas\&.initialized (\fBbool *\fR) r\-
-.RS 4
-An array of
-arenas\&.narenas
-booleans\&. Each boolean indicates whether the corresponding arena is initialized\&.
-.RE
-.PP
-arenas\&.lg_dirty_mult (\fBssize_t\fR) rw
+arenas\&.dirty_decay_ms (\fBssize_t\fR) rw
.RS 4
-Current default per\-arena minimum ratio (log base 2) of active to dirty pages, used to initialize
-arena\&.<i>\&.lg_dirty_mult
+Current default per\-arena approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused, used to initialize
+arena\&.<i>\&.dirty_decay_ms
during arena creation\&. See
-opt\&.lg_dirty_mult
+opt\&.dirty_decay_ms
for additional information\&.
.RE
.PP
-arenas\&.decay_time (\fBssize_t\fR) rw
+arenas\&.muzzy_decay_ms (\fBssize_t\fR) rw
.RS 4
-Current default per\-arena approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused, used to initialize
-arena\&.<i>\&.decay_time
+Current default per\-arena approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused, used to initialize
+arena\&.<i>\&.muzzy_decay_ms
during arena creation\&. See
-opt\&.decay_time
+opt\&.muzzy_decay_ms
for additional information\&.
.RE
.PP
@@ -1462,7 +1526,7 @@ arenas\&.page (\fBsize_t\fR) r\-
Page size\&.
.RE
.PP
-arenas\&.tcache_max (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
+arenas\&.tcache_max (\fBsize_t\fR) r\-
.RS 4
Maximum thread\-cached size class\&.
.RE
@@ -1472,7 +1536,7 @@ arenas\&.nbins (\fBunsigned\fR) r\-
Number of bin size classes\&.
.RE
.PP
-arenas\&.nhbins (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
+arenas\&.nhbins (\fBunsigned\fR) r\-
.RS 4
Total number of thread cache bin size classes\&.
.RE
@@ -1484,37 +1548,27 @@ Maximum size supported by size class\&.
.PP
arenas\&.bin\&.<i>\&.nregs (\fBuint32_t\fR) r\-
.RS 4
-Number of regions per page run\&.
+Number of regions per slab\&.
.RE
.PP
-arenas\&.bin\&.<i>\&.run_size (\fBsize_t\fR) r\-
+arenas\&.bin\&.<i>\&.slab_size (\fBsize_t\fR) r\-
.RS 4
-Number of bytes per page run\&.
+Number of bytes per slab\&.
.RE
.PP
-arenas\&.nlruns (\fBunsigned\fR) r\-
+arenas\&.nlextents (\fBunsigned\fR) r\-
.RS 4
Total number of large size classes\&.
.RE
.PP
-arenas\&.lrun\&.<i>\&.size (\fBsize_t\fR) r\-
+arenas\&.lextent\&.<i>\&.size (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by this large size class\&.
.RE
.PP
-arenas\&.nhchunks (\fBunsigned\fR) r\-
+arenas\&.create (\fBunsigned\fR, \fBextent_hooks_t *\fR) rw
.RS 4
-Total number of huge size classes\&.
-.RE
-.PP
-arenas\&.hchunk\&.<i>\&.size (\fBsize_t\fR) r\-
-.RS 4
-Maximum size supported by this huge size class\&.
-.RE
-.PP
-arenas\&.extend (\fBunsigned\fR) r\-
-.RS 4
-Extend the array of arenas by appending a new arena, and returning the new arena index\&.
+Explicitly create a new arena outside the range of automatically managed arenas, with optionally specified extent hooks, and return the new arena index\&.
.RE
.PP
prof\&.thread_active_init (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
@@ -1576,13 +1630,6 @@ opt\&.lg_prof_interval
option for additional information\&.
.RE
.PP
-stats\&.cactive (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter\&. Note that the
-epoch
-mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&.
-.RE
-.PP
stats\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes allocated by the application\&.
@@ -1592,14 +1639,15 @@ stats\&.active (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
stats\&.allocated\&. This does not include
-stats\&.arenas\&.<i>\&.pdirty, nor pages entirely devoted to allocator metadata\&.
+stats\&.arenas\&.<i>\&.pdirty,
+stats\&.arenas\&.<i>\&.pmuzzy, nor pages entirely devoted to allocator metadata\&.
.RE
.PP
stats\&.metadata (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive internal allocator data structures, arena chunk headers (see
-stats\&.arenas\&.<i>\&.metadata\&.mapped), and internal allocations (see
-stats\&.arenas\&.<i>\&.metadata\&.allocated)\&.
+Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive allocator metadata structures (see
+stats\&.arenas\&.<i>\&.base) and internal allocations (see
+stats\&.arenas\&.<i>\&.internal)\&.
.RE
.PP
stats\&.resident (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1610,20 +1658,99 @@ stats\&.active\&.
.PP
stats\&.mapped (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Total number of bytes in active chunks mapped by the allocator\&. This is a multiple of the chunk size, and is larger than
-stats\&.active\&. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and
+Total number of bytes in active extents mapped by the allocator\&. This is larger than
+stats\&.active\&. This does not include inactive extents, even those that contain unused dirty pages, which means that there is no strict ordering between this and
stats\&.resident\&.
.RE
.PP
stats\&.retained (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e\&.g\&.
-\fBmunmap\fR(2)\&. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see
-chunk hooks
+\fBmunmap\fR(2)
+or similar\&. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see
+extent hooks
for details)\&. Retained memory is excluded from mapped memory statistics, e\&.g\&.
stats\&.mapped\&.
.RE
.PP
+stats\&.background_thread\&.num_threads (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of
+background threads
+running currently\&.
+.RE
+.PP
+stats\&.background_thread\&.num_runs (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of runs from all
+background threads\&.
+.RE
+.PP
+stats\&.background_thread\&.run_interval (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Average run interval in nanoseconds of
+background threads\&.
+.RE
+.PP
+stats\&.mutexes\&.ctl\&.{counter}; (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Statistics on
+\fIctl\fR
+mutex (global scope; mallctl related)\&.
+{counter}
+is one of the counters below:
+.PP
+.RS 4
+\fInum_ops\fR
+(\fBuint64_t\fR): Total number of lock acquisition operations on this mutex\&.
+.sp
+\fInum_spin_acq\fR
+(\fBuint64_t\fR): Number of times the mutex was spin\-acquired\&. When the mutex is currently locked and cannot be acquired immediately, a short period of spin\-retry within jemalloc will be performed\&. Acquired through spin generally means the contention was lightweight and not causing context switches\&.
+.sp
+\fInum_wait\fR
+(\fBuint64_t\fR): Number of times the mutex was wait\-acquired, which means the mutex contention was not solved by spin\-retry, and blocking operation was likely involved in order to acquire the mutex\&. This event generally implies higher cost / longer delay, and should be investigated if it happens often\&.
+.sp
+\fImax_wait_time\fR
+(\fBuint64_t\fR): Maximum length of time in nanoseconds spent on a single wait\-acquired lock operation\&. Note that to avoid profiling overhead on the common path, this does not consider spin\-acquired cases\&.
+.sp
+\fItotal_wait_time\fR
+(\fBuint64_t\fR): Cumulative time in nanoseconds spent on wait\-acquired lock operations\&. Similarly, spin\-acquired cases are not considered\&.
+.sp
+\fImax_num_thds\fR
+(\fBuint32_t\fR): Maximum number of threads waiting on this mutex simultaneously\&. Similarly, spin\-acquired cases are not considered\&.
+.sp
+\fInum_owner_switch\fR
+(\fBuint64_t\fR): Number of times the current mutex owner is different from the previous one\&. This event does not generally imply an issue; rather it is an indicator of how often the protected data are accessed by different threads\&.
+.RE
+.RE
+.PP
+stats\&.mutexes\&.background_thread\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Statistics on
+\fIbackground_thread\fR
+mutex (global scope;
+background_thread
+related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
+.RE
+.PP
+stats\&.mutexes\&.prof\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Statistics on
+\fIprof\fR
+mutex (global scope; profiling related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
+.RE
+.PP
+stats\&.mutexes\&.reset (\fBvoid\fR) \-\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Reset all mutex profile statistics, including global mutexes, arena mutexes and bin mutexes\&.
+.RE
+.PP
stats\&.arenas\&.<i>\&.dss (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
@@ -1633,17 +1760,17 @@ opt\&.dss
for details\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lg_dirty_mult (\fBssize_t\fR) r\-
+stats\&.arenas\&.<i>\&.dirty_decay_ms (\fBssize_t\fR) r\-
.RS 4
-Minimum ratio (log base 2) of active to dirty pages\&. See
-opt\&.lg_dirty_mult
+Approximate time in milliseconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. See
+opt\&.dirty_decay_ms
for details\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.decay_time (\fBssize_t\fR) r\-
+stats\&.arenas\&.<i>\&.muzzy_decay_ms (\fBssize_t\fR) r\-
.RS 4
-Approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. See
-opt\&.decay_time
+Approximate time in milliseconds from the creation of a set of unused muzzy pages until an equivalent set of unused muzzy pages is purged and/or reused\&. See
+opt\&.muzzy_decay_ms
for details\&.
.RE
.PP
@@ -1652,16 +1779,33 @@ stats\&.arenas\&.<i>\&.nthreads (\fBunsigned\fR) r\-
Number of threads currently assigned to arena\&.
.RE
.PP
+stats\&.arenas\&.<i>\&.uptime (\fBuint64_t\fR) r\-
+.RS 4
+Time elapsed (in nanoseconds) since the arena was created\&. If <i> equals
+\fB0\fR
+or
+\fBMALLCTL_ARENAS_ALL\fR, this is the uptime since malloc initialization\&.
+.RE
+.PP
stats\&.arenas\&.<i>\&.pactive (\fBsize_t\fR) r\-
.RS 4
-Number of pages in active runs\&.
+Number of pages in active extents\&.
.RE
.PP
stats\&.arenas\&.<i>\&.pdirty (\fBsize_t\fR) r\-
.RS 4
-Number of pages within unused runs that are potentially dirty, and for which
-madvise\fI\&.\&.\&.\fR \fI\fBMADV_DONTNEED\fR\fR
-or similar has not been called\&.
+Number of pages within unused extents that are potentially dirty, and for which
+madvise()
+or similar has not been called\&. See
+opt\&.dirty_decay_ms
+for a description of dirty pages\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.pmuzzy (\fBsize_t\fR) r\-
+.RS 4
+Number of pages within unused extents that are muzzy\&. See
+opt\&.muzzy_decay_ms
+for a description of muzzy pages\&.
.RE
.PP
stats\&.arenas\&.<i>\&.mapped (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1676,39 +1820,53 @@ stats\&.retained
for details\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.metadata\&.mapped (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.base (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&.
+Number of bytes dedicated to bootstrap\-sensitive allocator metadata structures\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.metadata\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.internal (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&. This statistic is reported separately from
-stats\&.metadata
-and
-stats\&.arenas\&.<i>\&.metadata\&.mapped
-because it overlaps with e\&.g\&. the
-stats\&.allocated
-and
-stats\&.active
-statistics, whereas the other metadata statistics do not\&.
+Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.resident (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Maximum number of bytes in physically resident data pages mapped by the arena, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages\&. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand\-zeroed virtual memory that has not yet been touched\&. This is a multiple of the page size\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.npurge (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.dirty_npurge (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of dirty page purge sweeps performed\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.nmadvise (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.dirty_nmadvise (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of
-madvise\fI\&.\&.\&.\fR \fI\fBMADV_DONTNEED\fR\fR
+madvise()
or similar calls made to purge dirty pages\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.purged (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.dirty_purged (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of dirty pages purged\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.muzzy_npurge (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of muzzy page purge sweeps performed\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.muzzy_nmadvise (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Number of pages purged\&.
+Number of
+madvise()
+or similar calls made to purge muzzy pages\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.muzzy_purged (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of muzzy pages purged\&.
.RE
.PP
stats\&.arenas\&.<i>\&.small\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1718,17 +1876,21 @@ Number of bytes currently allocated by small objects\&.
.PP
stats\&.arenas\&.<i>\&.small\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of allocation requests served by small bins\&.
+Cumulative number of times a small allocation was requested from the arena\*(Aqs bins, whether to fill the relevant tcache if
+opt\&.tcache
+is enabled, or to directly satisfy an allocation request otherwise\&.
.RE
.PP
stats\&.arenas\&.<i>\&.small\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of small objects returned to bins\&.
+Cumulative number of times a small allocation was returned to the arena\*(Aqs bins, whether to flush the relevant tcache if
+opt\&.tcache
+is enabled, or to directly deallocate an allocation otherwise\&.
.RE
.PP
stats\&.arenas\&.<i>\&.small\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of small allocation requests\&.
+Cumulative number of allocation requests satisfied by all bin size classes\&.
.RE
.PP
stats\&.arenas\&.<i>\&.large\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1738,129 +1900,201 @@ Number of bytes currently allocated by large objects\&.
.PP
stats\&.arenas\&.<i>\&.large\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of large allocation requests served directly by the arena\&.
+Cumulative number of times a large extent was allocated from the arena, whether to fill the relevant tcache if
+opt\&.tcache
+is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise\&.
.RE
.PP
stats\&.arenas\&.<i>\&.large\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of large deallocation requests served directly by the arena\&.
+Cumulative number of times a large extent was returned to the arena, whether to flush the relevant tcache if
+opt\&.tcache
+is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise\&.
.RE
.PP
stats\&.arenas\&.<i>\&.large\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of large allocation requests\&.
+Cumulative number of allocation requests satisfied by all large size classes\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Number of bytes currently allocated by huge objects\&.
+Cumulative number of times a bin region of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if
+opt\&.tcache
+is enabled, or to directly satisfy an allocation request otherwise\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of huge allocation requests served directly by the arena\&.
+Cumulative number of times a bin region of the corresponding size class was returned to the arena, whether to flush the relevant tcache if
+opt\&.tcache
+is enabled, or to directly deallocate an allocation otherwise\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of huge deallocation requests served directly by the arena\&.
+Cumulative number of allocation requests satisfied by bin regions of the corresponding size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.curregs (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of huge allocation requests\&.
+Current number of regions for this size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills (\fBuint64_t\fR) r\-
.RS 4
-Cumulative number of allocations served by bin\&.
+Cumulative number of tcache fills\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes (\fBuint64_t\fR) r\-
.RS 4
-Cumulative number of allocations returned to bin\&.
+Cumulative number of tcache flushes\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.nslabs (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of allocation requests\&.
+Cumulative number of slabs created\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.curregs (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreslabs (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Current number of regions for this size class\&.
+Cumulative number of times the current slab from which to allocate changed\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.curslabs (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of tcache fills\&.
+Current number of slabs\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
+stats\&.arenas\&.<i>\&.bins\&.<j>\&.mutex\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of tcache flushes\&.
+Statistics on
+\fIarena\&.<i>\&.bins\&.<j>\fR
+mutex (arena bin scope; bin operation related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.lextents\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of times a large extent of the corresponding size class was allocated from the arena, whether to fill the relevant tcache if
+opt\&.tcache
+is enabled and the size class is within the range being cached, or to directly satisfy an allocation request otherwise\&.
+.RE
+.PP
+stats\&.arenas\&.<i>\&.lextents\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of times a large extent of the corresponding size class was returned to the arena, whether to flush the relevant tcache if
+opt\&.tcache
+is enabled and the size class is within the range being cached, or to directly deallocate an allocation otherwise\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nruns (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.lextents\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of runs created\&.
+Cumulative number of allocation requests satisfied by large extents of the corresponding size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreruns (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.lextents\&.<j>\&.curlextents (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of times the current run from which to allocate changed\&.
+Current number of large allocations for this size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.curruns (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.large\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Current number of runs\&.
+Statistics on
+\fIarena\&.<i>\&.large\fR
+mutex (arena scope; large allocation related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.extent_avail\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of allocation requests for this size class served directly by the arena\&.
+Statistics on
+\fIarena\&.<i>\&.extent_avail \fR
+mutex (arena scope; extent avail related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.extents_dirty\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of deallocation requests for this size class served directly by the arena\&.
+Statistics on
+\fIarena\&.<i>\&.extents_dirty \fR
+mutex (arena scope; dirty extents related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.extents_muzzy\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of allocation requests for this size class\&.
+Statistics on
+\fIarena\&.<i>\&.extents_muzzy \fR
+mutex (arena scope; muzzy extents related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.curruns (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.extents_retained\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Current number of runs for this size class\&.
+Statistics on
+\fIarena\&.<i>\&.extents_retained \fR
+mutex (arena scope; retained extents related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.decay_dirty\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of allocation requests for this size class served directly by the arena\&.
+Statistics on
+\fIarena\&.<i>\&.decay_dirty \fR
+mutex (arena scope; decay for dirty pages related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.decay_muzzy\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of deallocation requests for this size class served directly by the arena\&.
+Statistics on
+\fIarena\&.<i>\&.decay_muzzy \fR
+mutex (arena scope; decay for muzzy pages related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.base\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of allocation requests for this size class\&.
+Statistics on
+\fIarena\&.<i>\&.base\fR
+mutex (arena scope; base allocator related)\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.curhchunks (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+stats\&.arenas\&.<i>\&.mutexes\&.tcache_list\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Current number of huge allocations for this size class\&.
+Statistics on
+\fIarena\&.<i>\&.tcache_list\fR
+mutex (arena scope; tcache to arena association related)\&. This mutex is expected to be accessed less often\&.
+{counter}
+is one of the counters in
+mutex profiling counters\&.
.RE
.SH "HEAP PROFILE FORMAT"
.PP
Although the heap profiling functionality was originally designed to be compatible with the
\fBpprof\fR
command that is developed as part of the
-\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[4]\d\s+2, the addition of per thread heap profiling functionality required a different heap profile format\&. The
+\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2, the addition of per thread heap profiling functionality required a different heap profile format\&. The
\fBjeprof\fR
command is derived from
\fBpprof\fR, with enhancements to support the heap profile format described here\&.
@@ -1936,11 +2170,7 @@ option) tends to expose such bugs in the form of obviously incorrect results and
opt\&.zero
option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&.
.PP
-This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2
-tool if the
-\fB\-\-enable\-valgrind\fR
-configuration option is enabled\&.
+This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&.
.SH "DIAGNOSTIC MESSAGES"
.PP
If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor
@@ -2132,13 +2362,13 @@ ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf
.RE
.\}
.PP
-To specify in the source a chunk size that is 16 MiB:
+To specify in the source that only one arena should be automatically created:
.sp
.if n \{\
.RS 4
.\}
.nf
-malloc_conf = "lg_chunk:24";
+malloc_conf = "narenas:1";
.fi
.if n \{\
.RE
@@ -2198,11 +2428,6 @@ JSON format
\%http://www.json.org/
.RE
.IP " 3." 4
-Valgrind
-.RS 4
-\%http://valgrind.org/
-.RE
-.IP " 4." 4
gperftools package
.RS 4
\%http://code.google.com/p/gperftools/
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena.h b/contrib/jemalloc/include/jemalloc/internal/arena.h
deleted file mode 100644
index 277989f4b795..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/arena.h
+++ /dev/null
@@ -1,1538 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
-
-/* Maximum number of regions in one run. */
-#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
-#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
-
-/*
- * Minimum redzone size. Redzones may be larger than this if necessary to
- * preserve region alignment.
- */
-#define REDZONE_MINSIZE 16
-
-/*
- * The minimum ratio of active:dirty pages per arena is computed as:
- *
- * (nactive >> lg_dirty_mult) >= ndirty
- *
- * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
- * many active pages as dirty pages.
- */
-#define LG_DIRTY_MULT_DEFAULT 3
-
-typedef enum {
- purge_mode_ratio = 0,
- purge_mode_decay = 1,
-
- purge_mode_limit = 2
-} purge_mode_t;
-#define PURGE_DEFAULT purge_mode_ratio
-/* Default decay time in seconds. */
-#define DECAY_TIME_DEFAULT 10
-/* Number of event ticks between time checks. */
-#define DECAY_NTICKS_PER_UPDATE 1000
-
-typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
-typedef struct arena_avail_links_s arena_avail_links_t;
-typedef struct arena_run_s arena_run_t;
-typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
-typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
-typedef struct arena_chunk_s arena_chunk_t;
-typedef struct arena_bin_info_s arena_bin_info_t;
-typedef struct arena_decay_s arena_decay_t;
-typedef struct arena_bin_s arena_bin_t;
-typedef struct arena_s arena_t;
-typedef struct arena_tdata_s arena_tdata_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#ifdef JEMALLOC_ARENA_STRUCTS_A
-struct arena_run_s {
- /* Index of bin this run is associated with. */
- szind_t binind;
-
- /* Number of free regions in run. */
- unsigned nfree;
-
- /* Per region allocated/deallocated bitmap. */
- bitmap_t bitmap[BITMAP_GROUPS_MAX];
-};
-
-/* Each element of the chunk map corresponds to one page within the chunk. */
-struct arena_chunk_map_bits_s {
- /*
- * Run address (or size) and various flags are stored together. The bit
- * layout looks like (assuming 32-bit system):
- *
- * ???????? ???????? ???nnnnn nnndumla
- *
- * ? : Unallocated: Run address for first/last pages, unset for internal
- * pages.
- * Small: Run page offset.
- * Large: Run page count for first page, unset for trailing pages.
- * n : binind for small size class, BININD_INVALID for large size class.
- * d : dirty?
- * u : unzeroed?
- * m : decommitted?
- * l : large?
- * a : allocated?
- *
- * Following are example bit patterns for the three types of runs.
- *
- * p : run page offset
- * s : run size
- * n : binind for size class; large objects set these to BININD_INVALID
- * x : don't care
- * - : 0
- * + : 1
- * [DUMLA] : bit set
- * [dumla] : bit unset
- *
- * Unallocated (clean):
- * ssssssss ssssssss sss+++++ +++dum-a
- * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
- * ssssssss ssssssss sss+++++ +++dUm-a
- *
- * Unallocated (dirty):
- * ssssssss ssssssss sss+++++ +++D-m-a
- * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
- * ssssssss ssssssss sss+++++ +++D-m-a
- *
- * Small:
- * pppppppp pppppppp pppnnnnn nnnd---A
- * pppppppp pppppppp pppnnnnn nnn----A
- * pppppppp pppppppp pppnnnnn nnnd---A
- *
- * Large:
- * ssssssss ssssssss sss+++++ +++D--LA
- * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
- * -------- -------- ---+++++ +++D--LA
- *
- * Large (sampled, size <= LARGE_MINCLASS):
- * ssssssss ssssssss sssnnnnn nnnD--LA
- * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
- * -------- -------- ---+++++ +++D--LA
- *
- * Large (not sampled, size == LARGE_MINCLASS):
- * ssssssss ssssssss sss+++++ +++D--LA
- * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
- * -------- -------- ---+++++ +++D--LA
- */
- size_t bits;
-#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
-#define CHUNK_MAP_LARGE ((size_t)0x02U)
-#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
-
-#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
-#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
-#define CHUNK_MAP_DIRTY ((size_t)0x10U)
-#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
-
-#define CHUNK_MAP_BININD_SHIFT 5
-#define BININD_INVALID ((size_t)0xffU)
-#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
-#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
-
-#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
-#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
-#define CHUNK_MAP_SIZE_MASK \
- (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
-};
-
-struct arena_runs_dirty_link_s {
- qr(arena_runs_dirty_link_t) rd_link;
-};
-
-/*
- * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
- * like arena_chunk_map_bits_t. Two separate arrays are stored within each
- * chunk header in order to improve cache locality.
- */
-struct arena_chunk_map_misc_s {
- /*
- * Linkage for run heaps. There are two disjoint uses:
- *
- * 1) arena_t's runs_avail heaps.
- * 2) arena_run_t conceptually uses this linkage for in-use non-full
- * runs, rather than directly embedding linkage.
- */
- phn(arena_chunk_map_misc_t) ph_link;
-
- union {
- /* Linkage for list of dirty runs. */
- arena_runs_dirty_link_t rd;
-
- /* Profile counters, used for large object runs. */
- union {
- void *prof_tctx_pun;
- prof_tctx_t *prof_tctx;
- };
-
- /* Small region run metadata. */
- arena_run_t run;
- };
-};
-typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
-#endif /* JEMALLOC_ARENA_STRUCTS_A */
-
-#ifdef JEMALLOC_ARENA_STRUCTS_B
-/* Arena chunk header. */
-struct arena_chunk_s {
- /*
- * A pointer to the arena that owns the chunk is stored within the node.
- * This field as a whole is used by chunks_rtree to support both
- * ivsalloc() and core-based debugging.
- */
- extent_node_t node;
-
- /*
- * True if memory could be backed by transparent huge pages. This is
- * only directly relevant to Linux, since it is the only supported
- * platform on which jemalloc interacts with explicit transparent huge
- * page controls.
- */
- bool hugepage;
-
- /*
- * Map of pages within chunk that keeps track of free/large/small. The
- * first map_bias entries are omitted, since the chunk header does not
- * need to be tracked in the map. This omission saves a header page
- * for common chunk sizes (e.g. 4 MiB).
- */
- arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
-};
-
-/*
- * Read-only information associated with each element of arena_t's bins array
- * is stored separately, partly to reduce memory usage (only one copy, rather
- * than one per arena), but mainly to avoid false cacheline sharing.
- *
- * Each run has the following layout:
- *
- * /--------------------\
- * | pad? |
- * |--------------------|
- * | redzone |
- * reg0_offset | region 0 |
- * | redzone |
- * |--------------------| \
- * | redzone | |
- * | region 1 | > reg_interval
- * | redzone | /
- * |--------------------|
- * | ... |
- * | ... |
- * | ... |
- * |--------------------|
- * | redzone |
- * | region nregs-1 |
- * | redzone |
- * |--------------------|
- * | alignment pad? |
- * \--------------------/
- *
- * reg_interval has at least the same minimum alignment as reg_size; this
- * preserves the alignment constraint that sa2u() depends on. Alignment pad is
- * either 0 or redzone_size; it is present only if needed to align reg0_offset.
- */
-struct arena_bin_info_s {
- /* Size of regions in a run for this bin's size class. */
- size_t reg_size;
-
- /* Redzone size. */
- size_t redzone_size;
-
- /* Interval between regions (reg_size + (redzone_size << 1)). */
- size_t reg_interval;
-
- /* Total size of a run for this bin's size class. */
- size_t run_size;
-
- /* Total number of regions in a run for this bin's size class. */
- uint32_t nregs;
-
- /*
- * Metadata used to manipulate bitmaps for runs associated with this
- * bin.
- */
- bitmap_info_t bitmap_info;
-
- /* Offset of first region in a run for this bin's size class. */
- uint32_t reg0_offset;
-};
-
-struct arena_decay_s {
- /*
- * Approximate time in seconds from the creation of a set of unused
- * dirty pages until an equivalent set of unused dirty pages is purged
- * and/or reused.
- */
- ssize_t time;
- /* time / SMOOTHSTEP_NSTEPS. */
- nstime_t interval;
- /*
- * Time at which the current decay interval logically started. We do
- * not actually advance to a new epoch until sometime after it starts
- * because of scheduling and computation delays, and it is even possible
- * to completely skip epochs. In all cases, during epoch advancement we
- * merge all relevant activity into the most recently recorded epoch.
- */
- nstime_t epoch;
- /* Deadline randomness generator. */
- uint64_t jitter_state;
- /*
- * Deadline for current epoch. This is the sum of interval and per
- * epoch jitter which is a uniform random variable in [0..interval).
- * Epochs always advance by precise multiples of interval, but we
- * randomize the deadline to reduce the likelihood of arenas purging in
- * lockstep.
- */
- nstime_t deadline;
- /*
- * Number of dirty pages at beginning of current epoch. During epoch
- * advancement we use the delta between arena->decay.ndirty and
- * arena->ndirty to determine how many dirty pages, if any, were
- * generated.
- */
- size_t ndirty;
- /*
- * Trailing log of how many unused dirty pages were generated during
- * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
- * element is the most recent epoch. Corresponding epoch times are
- * relative to epoch.
- */
- size_t backlog[SMOOTHSTEP_NSTEPS];
-};
-
-struct arena_bin_s {
- /*
- * All operations on runcur, runs, and stats require that lock be
- * locked. Run allocation/deallocation are protected by the arena lock,
- * which may be acquired while holding one or more bin locks, but not
- * vise versa.
- */
- malloc_mutex_t lock;
-
- /*
- * Current run being used to service allocations of this bin's size
- * class.
- */
- arena_run_t *runcur;
-
- /*
- * Heap of non-full runs. This heap is used when looking for an
- * existing run when runcur is no longer usable. We choose the
- * non-full run that is lowest in memory; this policy tends to keep
- * objects packed well, and it can also help reduce the number of
- * almost-empty chunks.
- */
- arena_run_heap_t runs;
-
- /* Bin statistics. */
- malloc_bin_stats_t stats;
-};
-
-struct arena_s {
- /* This arena's index within the arenas array. */
- unsigned ind;
-
- /*
- * Number of threads currently assigned to this arena, synchronized via
- * atomic operations. Each thread has two distinct assignments, one for
- * application-serving allocation, and the other for internal metadata
- * allocation. Internal metadata must not be allocated from arenas
- * created via the arenas.extend mallctl, because the arena.<i>.reset
- * mallctl indiscriminately discards all allocations for the affected
- * arena.
- *
- * 0: Application allocation.
- * 1: Internal metadata allocation.
- */
- unsigned nthreads[2];
-
- /*
- * There are three classes of arena operations from a locking
- * perspective:
- * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
- * 2) Bin-related operations are protected by bin locks.
- * 3) Chunk- and run-related operations are protected by this mutex.
- */
- malloc_mutex_t lock;
-
- arena_stats_t stats;
- /*
- * List of tcaches for extant threads associated with this arena.
- * Stats from these are merged incrementally, and at exit if
- * opt_stats_print is enabled.
- */
- ql_head(tcache_t) tcache_ql;
-
- uint64_t prof_accumbytes;
-
- /*
- * PRNG state for cache index randomization of large allocation base
- * pointers.
- */
- size_t offset_state;
-
- dss_prec_t dss_prec;
-
- /* Extant arena chunks. */
- ql_head(extent_node_t) achunks;
-
- /* Extent serial number generator state. */
- size_t extent_sn_next;
-
- /*
- * In order to avoid rapid chunk allocation/deallocation when an arena
- * oscillates right on the cusp of needing a new chunk, cache the most
- * recently freed chunk. The spare is left in the arena's chunk trees
- * until it is deleted.
- *
- * There is one spare chunk per arena, rather than one spare total, in
- * order to avoid interactions between multiple threads that could make
- * a single spare inadequate.
- */
- arena_chunk_t *spare;
-
- /* Minimum ratio (log base 2) of nactive:ndirty. */
- ssize_t lg_dirty_mult;
-
- /* True if a thread is currently executing arena_purge_to_limit(). */
- bool purging;
-
- /* Number of pages in active runs and huge regions. */
- size_t nactive;
-
- /*
- * Current count of pages within unused runs that are potentially
- * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
- * By tracking this, we can institute a limit on how much dirty unused
- * memory is mapped for each arena.
- */
- size_t ndirty;
-
- /*
- * Unused dirty memory this arena manages. Dirty memory is conceptually
- * tracked as an arbitrarily interleaved LRU of dirty runs and cached
- * chunks, but the list linkage is actually semi-duplicated in order to
- * avoid extra arena_chunk_map_misc_t space overhead.
- *
- * LRU-----------------------------------------------------------MRU
- *
- * /-- arena ---\
- * | |
- * | |
- * |------------| /- chunk -\
- * ...->|chunks_cache|<--------------------------->| /----\ |<--...
- * |------------| | |node| |
- * | | | | | |
- * | | /- run -\ /- run -\ | | | |
- * | | | | | | | | | |
- * | | | | | | | | | |
- * |------------| |-------| |-------| | |----| |
- * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
- * |------------| |-------| |-------| | |----| |
- * | | | | | | | | | |
- * | | | | | | | \----/ |
- * | | \-------/ \-------/ | |
- * | | | |
- * | | | |
- * \------------/ \---------/
- */
- arena_runs_dirty_link_t runs_dirty;
- extent_node_t chunks_cache;
-
- /* Decay-based purging state. */
- arena_decay_t decay;
-
- /* Extant huge allocations. */
- ql_head(extent_node_t) huge;
- /* Synchronizes all huge allocation/update/deallocation. */
- malloc_mutex_t huge_mtx;
-
- /*
- * Trees of chunks that were previously allocated (trees differ only in
- * node ordering). These are used when allocating chunks, in an attempt
- * to re-use address space. Depending on function, different tree
- * orderings are needed, which is why there are two trees with the same
- * contents.
- */
- extent_tree_t chunks_szsnad_cached;
- extent_tree_t chunks_ad_cached;
- extent_tree_t chunks_szsnad_retained;
- extent_tree_t chunks_ad_retained;
-
- malloc_mutex_t chunks_mtx;
- /* Cache of nodes that were allocated via base_alloc(). */
- ql_head(extent_node_t) node_cache;
- malloc_mutex_t node_cache_mtx;
-
- /* User-configurable chunk hook functions. */
- chunk_hooks_t chunk_hooks;
-
- /* bins is used to store trees of free regions. */
- arena_bin_t bins[NBINS];
-
- /*
- * Size-segregated address-ordered heaps of this arena's available runs,
- * used for first-best-fit run allocation. Runs are quantized, i.e.
- * they reside in the last heap which corresponds to a size class less
- * than or equal to the run size.
- */
- arena_run_heap_t runs_avail[NPSIZES];
-};
-
-/* Used in conjunction with tsd for fast arena-related context lookup. */
-struct arena_tdata_s {
- ticker_t decay_ticker;
-};
-#endif /* JEMALLOC_ARENA_STRUCTS_B */
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-static const size_t large_pad =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- PAGE
-#else
- 0
-#endif
- ;
-
-extern bool opt_thp;
-extern purge_mode_t opt_purge;
-extern const char *purge_mode_names[];
-extern ssize_t opt_lg_dirty_mult;
-extern ssize_t opt_decay_time;
-
-extern arena_bin_info_t arena_bin_info[NBINS];
-
-extern size_t map_bias; /* Number of arena chunk header pages. */
-extern size_t map_misc_offset;
-extern size_t arena_maxrun; /* Max run size for arenas. */
-extern size_t large_maxclass; /* Max large size class. */
-extern unsigned nlclasses; /* Number of large size classes. */
-extern unsigned nhclasses; /* Number of huge size classes. */
-
-#ifdef JEMALLOC_JET
-typedef size_t (run_quantize_t)(size_t);
-extern run_quantize_t *run_quantize_floor;
-extern run_quantize_t *run_quantize_ceil;
-#endif
-void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
- bool cache);
-void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
- bool cache);
-extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
-void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
-void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, size_t *sn, bool *zero);
-void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t usize, size_t sn);
-void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize);
-void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, size_t sn);
-bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, bool *zero);
-ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
-void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
-void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
-void arena_reset(tsd_t *tsd, arena_t *arena);
-void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
- tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
-void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
- bool zero);
-#ifdef JEMALLOC_JET
-typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
- uint8_t);
-extern arena_redzone_corruption_t *arena_redzone_corruption;
-typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
-extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
-#else
-void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
-#endif
-void arena_quarantine_junk_small(void *ptr, size_t usize);
-void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
- bool zero);
-void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
- szind_t ind, bool zero);
-void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
-void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind);
-#ifdef JEMALLOC_JET
-typedef void (arena_dalloc_junk_large_t)(void *, size_t);
-extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
-#else
-void arena_dalloc_junk_large(void *ptr, size_t usize);
-#endif
-void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr);
-void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr);
-#ifdef JEMALLOC_JET
-typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
-extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
-#endif
-bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t size, size_t extra, bool zero);
-void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
-ssize_t arena_lg_dirty_mult_default_get(void);
-bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_default_get(void);
-bool arena_decay_time_default_set(ssize_t decay_time);
-void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
- unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
- ssize_t *decay_time, size_t *nactive, size_t *ndirty);
-void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty, arena_stats_t *astats,
- malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
- malloc_huge_stats_t *hstats);
-unsigned arena_nthreads_get(arena_t *arena, bool internal);
-void arena_nthreads_inc(arena_t *arena, bool internal);
-void arena_nthreads_dec(arena_t *arena, bool internal);
-size_t arena_extent_sn_next(arena_t *arena);
-arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
-void arena_boot(void);
-void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
- size_t pageind);
-const arena_chunk_map_bits_t *arena_bitselm_get_const(
- const arena_chunk_t *chunk, size_t pageind);
-arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
- size_t pageind);
-const arena_chunk_map_misc_t *arena_miscelm_get_const(
- const arena_chunk_t *chunk, size_t pageind);
-size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
-void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
-arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
-arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
-size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
-const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbitsp_read(const size_t *mapbitsp);
-size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_size_decode(size_t mapbits);
-size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
- size_t pageind);
-szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
-void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
-size_t arena_mapbits_size_encode(size_t size);
-void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
- size_t size, size_t flags);
-void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
- size_t size);
-void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
- size_t flags);
-void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
- size_t size, size_t flags);
-void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- szind_t binind);
-void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
- size_t runind, szind_t binind, size_t flags);
-void arena_metadata_allocated_add(arena_t *arena, size_t size);
-void arena_metadata_allocated_sub(arena_t *arena, size_t size);
-size_t arena_metadata_allocated_get(arena_t *arena);
-bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
-bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
-szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
-szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
-size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
- const void *ptr);
-prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *old_tctx);
-void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
-void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
-void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache, bool slow_path);
-arena_t *arena_aalloc(const void *ptr);
-size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
-void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
-void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
-# ifdef JEMALLOC_ARENA_INLINE_A
-JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
-arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
-{
-
- assert(pageind >= map_bias);
- assert(pageind < chunk_npages);
-
- return (&chunk->map_bits[pageind-map_bias]);
-}
-
-JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
-arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
-}
-
-JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
-{
-
- assert(pageind >= map_bias);
- assert(pageind < chunk_npages);
-
- return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
- (uintptr_t)map_misc_offset) + pageind-map_bias);
-}
-
-JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
-arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-#if 1 /* Work around gcc bug. */
- arena_chunk_t *mchunk = (arena_chunk_t *)chunk;
-
- return (arena_miscelm_get_mutable(mchunk, pageind));
-#else
- return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
- map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
-
- assert(pageind >= map_bias);
- assert(pageind < chunk_npages);
-
- return (pageind);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- size_t pageind = arena_miscelm_to_pageind(miscelm);
-
- return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
-}
-
-JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
-{
- arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
- *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
-
- assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
- assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
-
- return (miscelm);
-}
-
-JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_run_to_miscelm(arena_run_t *run)
-{
- arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
- *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
-
- assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
- assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
-
- return (miscelm);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t *
-arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
-{
-
- return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
-}
-
-JEMALLOC_ALWAYS_INLINE const size_t *
-arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-#if 1 /* Work around gcc bug. */
- arena_chunk_t *mchunk = (arena_chunk_t *)chunk;
-
- return (arena_mapbitsp_get_mutable(mchunk, pageind));
-#else
- return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbitsp_read(const size_t *mapbitsp)
-{
-
- return (*mapbitsp);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_size_decode(size_t mapbits)
-{
- size_t size;
-
-#if CHUNK_MAP_SIZE_SHIFT > 0
- size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
-#elif CHUNK_MAP_SIZE_SHIFT == 0
- size = mapbits & CHUNK_MAP_SIZE_MASK;
-#else
- size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
-#endif
-
- return (size);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- return (arena_mapbits_size_decode(mapbits));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
- (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
- return (arena_mapbits_size_decode(mapbits));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
- CHUNK_MAP_ALLOCATED);
- return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
- szind_t binind;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
- assert(binind < NBINS || binind == BININD_INVALID);
- return (binind);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
- (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- return (mapbits & CHUNK_MAP_DIRTY);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
- (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- return (mapbits & CHUNK_MAP_UNZEROED);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
- (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- return (mapbits & CHUNK_MAP_DECOMMITTED);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- return (mapbits & CHUNK_MAP_LARGE);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
-{
- size_t mapbits;
-
- mapbits = arena_mapbits_get(chunk, pageind);
- return (mapbits & CHUNK_MAP_ALLOCATED);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
-{
-
- *mapbitsp = mapbits;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_size_encode(size_t size)
-{
- size_t mapbits;
-
-#if CHUNK_MAP_SIZE_SHIFT > 0
- mapbits = size << CHUNK_MAP_SIZE_SHIFT;
-#elif CHUNK_MAP_SIZE_SHIFT == 0
- mapbits = size;
-#else
- mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
-#endif
-
- assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
- return (mapbits);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
- size_t flags)
-{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
-
- assert((size & PAGE_MASK) == 0);
- assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
- assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
- (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
- CHUNK_MAP_BININD_INVALID | flags);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
- size_t size)
-{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
- size_t mapbits = arena_mapbitsp_read(mapbitsp);
-
- assert((size & PAGE_MASK) == 0);
- assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
- (mapbits & ~CHUNK_MAP_SIZE_MASK));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
-{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
-
- assert((flags & CHUNK_MAP_UNZEROED) == flags);
- arena_mapbitsp_write(mapbitsp, flags);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
- size_t flags)
-{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
-
- assert((size & PAGE_MASK) == 0);
- assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
- assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
- (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
- CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- szind_t binind)
-{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
- size_t mapbits = arena_mapbitsp_read(mapbitsp);
-
- assert(binind <= BININD_INVALID);
- assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
- large_pad);
- arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
- (binind << CHUNK_MAP_BININD_SHIFT));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
- szind_t binind, size_t flags)
-{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
-
- assert(binind < BININD_INVALID);
- assert(pageind - runind >= map_bias);
- assert((flags & CHUNK_MAP_UNZEROED) == flags);
- arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
- (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
-}
-
-JEMALLOC_INLINE void
-arena_metadata_allocated_add(arena_t *arena, size_t size)
-{
-
- atomic_add_z(&arena->stats.metadata_allocated, size);
-}
-
-JEMALLOC_INLINE void
-arena_metadata_allocated_sub(arena_t *arena, size_t size)
-{
-
- atomic_sub_z(&arena->stats.metadata_allocated, size);
-}
-
-JEMALLOC_INLINE size_t
-arena_metadata_allocated_get(arena_t *arena)
-{
-
- return (atomic_read_z(&arena->stats.metadata_allocated));
-}
-
-JEMALLOC_INLINE bool
-arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
-{
-
- cassert(config_prof);
- assert(prof_interval != 0);
-
- arena->prof_accumbytes += accumbytes;
- if (arena->prof_accumbytes >= prof_interval) {
- arena->prof_accumbytes -= prof_interval;
- return (true);
- }
- return (false);
-}
-
-JEMALLOC_INLINE bool
-arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
-{
-
- cassert(config_prof);
-
- if (likely(prof_interval == 0))
- return (false);
- return (arena_prof_accum_impl(arena, accumbytes));
-}
-
-JEMALLOC_INLINE bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
-{
-
- cassert(config_prof);
-
- if (likely(prof_interval == 0))
- return (false);
-
- {
- bool ret;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- ret = arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (ret);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
-{
- szind_t binind;
-
- binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
-
- if (config_debug) {
- arena_chunk_t *chunk;
- arena_t *arena;
- size_t pageind;
- size_t actual_mapbits;
- size_t rpages_ind;
- const arena_run_t *run;
- arena_bin_t *bin;
- szind_t run_binind, actual_binind;
- arena_bin_info_t *bin_info;
- const arena_chunk_map_misc_t *miscelm;
- const void *rpages;
-
- assert(binind != BININD_INVALID);
- assert(binind < NBINS);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = extent_node_arena_get(&chunk->node);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- actual_mapbits = arena_mapbits_get(chunk, pageind);
- assert(mapbits == actual_mapbits);
- assert(arena_mapbits_large_get(chunk, pageind) == 0);
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
- pageind);
- miscelm = arena_miscelm_get_const(chunk, rpages_ind);
- run = &miscelm->run;
- run_binind = run->binind;
- bin = &arena->bins[run_binind];
- actual_binind = (szind_t)(bin - arena->bins);
- assert(run_binind == actual_binind);
- bin_info = &arena_bin_info[actual_binind];
- rpages = arena_miscelm_to_rpages(miscelm);
- assert(((uintptr_t)ptr - ((uintptr_t)rpages +
- (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
- == 0);
- }
-
- return (binind);
-}
-# endif /* JEMALLOC_ARENA_INLINE_A */
-
-# ifdef JEMALLOC_ARENA_INLINE_B
-JEMALLOC_INLINE szind_t
-arena_bin_index(arena_t *arena, arena_bin_t *bin)
-{
- szind_t binind = (szind_t)(bin - arena->bins);
- assert(binind < NBINS);
- return (binind);
-}
-
-JEMALLOC_INLINE size_t
-arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
-{
- size_t diff, interval, shift, regind;
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- void *rpages = arena_miscelm_to_rpages(miscelm);
-
- /*
- * Freeing a pointer lower than region zero can cause assertion
- * failure.
- */
- assert((uintptr_t)ptr >= (uintptr_t)rpages +
- (uintptr_t)bin_info->reg0_offset);
-
- /*
- * Avoid doing division with a variable divisor if possible. Using
- * actual division here can reduce allocator throughput by over 20%!
- */
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
- bin_info->reg0_offset);
-
- /* Rescale (factor powers of 2 out of the numerator and denominator). */
- interval = bin_info->reg_interval;
- shift = ffs_zu(interval) - 1;
- diff >>= shift;
- interval >>= shift;
-
- if (interval == 1) {
- /* The divisor was a power of 2. */
- regind = diff;
- } else {
- /*
- * To divide by a number D that is not a power of two we
- * multiply by (2^21 / D) and then right shift by 21 positions.
- *
- * X / D
- *
- * becomes
- *
- * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
- *
- * We can omit the first three elements, because we never
- * divide by 0, and 1 and 2 are both powers of two, which are
- * handled above.
- */
-#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
-#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
- static const size_t interval_invs[] = {
- SIZE_INV(3),
- SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
- SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
- SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
- SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
- SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
- SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
- SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
- };
-
- if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
- + 2))) {
- regind = (diff * interval_invs[interval - 3]) >>
- SIZE_INV_SHIFT;
- } else
- regind = diff / interval;
-#undef SIZE_INV
-#undef SIZE_INV_SHIFT
- }
- assert(diff == regind * interval);
- assert(regind < bin_info->nregs);
-
- return (regind);
-}
-
-JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
-{
- prof_tctx_t *ret;
- arena_chunk_t *chunk;
-
- cassert(config_prof);
- assert(ptr != NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
- ret = (prof_tctx_t *)(uintptr_t)1U;
- else {
- arena_chunk_map_misc_t *elm =
- arena_miscelm_get_mutable(chunk, pageind);
- ret = atomic_read_p(&elm->prof_tctx_pun);
- }
- } else
- ret = huge_prof_tctx_get(tsdn, ptr);
-
- return (ret);
-}
-
-JEMALLOC_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
-{
- arena_chunk_t *chunk;
-
- cassert(config_prof);
- assert(ptr != NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
-
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
-
- if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
- (uintptr_t)1U)) {
- arena_chunk_map_misc_t *elm;
-
- assert(arena_mapbits_large_get(chunk, pageind) != 0);
-
- elm = arena_miscelm_get_mutable(chunk, pageind);
- atomic_write_p(&elm->prof_tctx_pun, tctx);
- } else {
- /*
- * tctx must always be initialized for large runs.
- * Assert that the surrounding conditional logic is
- * equivalent to checking whether ptr refers to a large
- * run.
- */
- assert(arena_mapbits_large_get(chunk, pageind) == 0);
- }
- } else
- huge_prof_tctx_set(tsdn, ptr, tctx);
-}
-
-JEMALLOC_INLINE void
-arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *old_tctx)
-{
-
- cassert(config_prof);
- assert(ptr != NULL);
-
- if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
- (uintptr_t)old_tctx > (uintptr_t)1U))) {
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) {
- size_t pageind;
- arena_chunk_map_misc_t *elm;
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
- LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) !=
- 0);
- assert(arena_mapbits_large_get(chunk, pageind) != 0);
-
- elm = arena_miscelm_get_mutable(chunk, pageind);
- atomic_write_p(&elm->prof_tctx_pun,
- (prof_tctx_t *)(uintptr_t)1U);
- } else
- huge_prof_tctx_reset(tsdn, ptr);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
-{
- tsd_t *tsd;
- ticker_t *decay_ticker;
-
- if (unlikely(tsdn_null(tsdn)))
- return;
- tsd = tsdn_tsd(tsdn);
- decay_ticker = decay_ticker_get(tsd, arena->ind);
- if (unlikely(decay_ticker == NULL))
- return;
- if (unlikely(ticker_ticks(decay_ticker, nticks)))
- arena_purge(tsdn, arena, false);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
-{
-
- arena_decay_ticks(tsdn, arena, 1);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool slow_path)
-{
-
- assert(!tsdn_null(tsdn) || tcache == NULL);
- assert(size != 0);
-
- if (likely(tcache != NULL)) {
- if (likely(size <= SMALL_MAXCLASS)) {
- return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path));
- }
- if (likely(size <= tcache_maxclass)) {
- return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path));
- }
- /* (size > tcache_maxclass) case falls through. */
- assert(size > tcache_maxclass);
- }
-
- return (arena_malloc_hard(tsdn, arena, size, ind, zero));
-}
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-arena_aalloc(const void *ptr)
-{
- arena_chunk_t *chunk;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr))
- return (extent_node_arena_get(&chunk->node));
- else
- return (huge_aalloc(ptr));
-}
-
-/* Return the size of the allocation pointed to by ptr. */
-JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
-{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind;
- szind_t binind;
-
- assert(ptr != NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) {
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- binind = arena_mapbits_binind_get(chunk, pageind);
- if (unlikely(binind == BININD_INVALID || (config_prof && !demote
- && arena_mapbits_large_get(chunk, pageind) != 0))) {
- /*
- * Large allocation. In the common case (demote), and
- * as this is an inline function, most callers will only
- * end up looking at binind to determine that ptr is a
- * small allocation.
- */
- assert(config_cache_oblivious || ((uintptr_t)ptr &
- PAGE_MASK) == 0);
- ret = arena_mapbits_large_size_get(chunk, pageind) -
- large_pad;
- assert(ret != 0);
- assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
- chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk,
- pageind+((ret+large_pad)>>LG_PAGE)-1));
- } else {
- /*
- * Small allocation (possibly promoted to a large
- * object).
- */
- assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
- arena_ptr_small_binind_get(ptr,
- arena_mapbits_get(chunk, pageind)) == binind);
- ret = index2size(binind);
- }
- } else
- ret = huge_salloc(tsdn, ptr);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
-{
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- assert(!tsdn_null(tsdn) || tcache == NULL);
- assert(ptr != NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) {
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
- /* Small allocation. */
- if (likely(tcache != NULL)) {
- szind_t binind = arena_ptr_small_binind_get(ptr,
- mapbits);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
- } else {
- arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr, pageind);
- }
- } else {
- size_t size = arena_mapbits_large_size_get(chunk,
- pageind);
-
- assert(config_cache_oblivious || ((uintptr_t)ptr &
- PAGE_MASK) == 0);
-
- if (likely(tcache != NULL) && size - large_pad <=
- tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size - large_pad, slow_path);
- } else {
- arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr);
- }
- }
- } else
- huge_dalloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path)
-{
- arena_chunk_t *chunk;
-
- assert(!tsdn_null(tsdn) || tcache == NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) {
- if (config_prof && opt_prof) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
- LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) !=
- 0);
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- /*
- * Make sure to use promoted size, not request
- * size.
- */
- size = arena_mapbits_large_size_get(chunk,
- pageind) - large_pad;
- }
- }
- assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
-
- if (likely(size <= SMALL_MAXCLASS)) {
- /* Small allocation. */
- if (likely(tcache != NULL)) {
- szind_t binind = size2index(size);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
- } else {
- size_t pageind = ((uintptr_t)ptr -
- (uintptr_t)chunk) >> LG_PAGE;
- arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr, pageind);
- }
- } else {
- assert(config_cache_oblivious || ((uintptr_t)ptr &
- PAGE_MASK) == 0);
-
- if (likely(tcache != NULL) && size <= tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size, slow_path);
- } else {
- arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr);
- }
- }
- } else
- huge_dalloc(tsdn, ptr);
-}
-# endif /* JEMALLOC_ARENA_INLINE_B */
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_externs.h b/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
new file mode 100644
index 000000000000..3a85bcbb299d
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
@@ -0,0 +1,96 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
+#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
+
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats.h"
+
+extern ssize_t opt_dirty_decay_ms;
+extern ssize_t opt_muzzy_decay_ms;
+
+extern const arena_bin_info_t arena_bin_info[NBINS];
+
+extern percpu_arena_mode_t opt_percpu_arena;
+extern const char *percpu_arena_mode_names[];
+
+extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
+extern malloc_mutex_t arenas_lock;
+
+void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ szind_t szind, uint64_t nrequests);
+void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ size_t size);
+void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
+ unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
+ ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
+void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
+void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent);
+#ifdef JEMALLOC_JET
+size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
+#endif
+extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
+ size_t usize, size_t alignment, bool *zero);
+void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent);
+void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, size_t oldsize);
+void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, size_t oldsize);
+ssize_t arena_dirty_decay_ms_get(arena_t *arena);
+bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
+ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
+bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
+void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all);
+void arena_reset(tsd_t *tsd, arena_t *arena);
+void arena_destroy(tsd_t *tsd, arena_t *arena);
+void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
+void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
+ bool zero);
+
+typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
+extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
+
+void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
+ szind_t ind, bool zero);
+void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache);
+void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
+void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ bool slow_path);
+void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, void *ptr);
+void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
+bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero);
+void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+ssize_t arena_dirty_decay_ms_default_get(void);
+bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
+ssize_t arena_muzzy_decay_ms_default_get(void);
+bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
+unsigned arena_nthreads_get(arena_t *arena, bool internal);
+void arena_nthreads_inc(arena_t *arena, bool internal);
+void arena_nthreads_dec(arena_t *arena, bool internal);
+size_t arena_extent_sn_next(arena_t *arena);
+arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+void arena_boot(void);
+void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
+
+#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
new file mode 100644
index 000000000000..da5877060a85
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
@@ -0,0 +1,57 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
+#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
+
+static inline unsigned
+arena_ind_get(const arena_t *arena) {
+ return base_ind_get(arena->base);
+}
+
+static inline void
+arena_internal_add(arena_t *arena, size_t size) {
+ atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
+}
+
+static inline void
+arena_internal_sub(arena_t *arena, size_t size) {
+ atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
+}
+
+static inline size_t
+arena_internal_get(arena_t *arena) {
+ return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
+}
+
+static inline bool
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
+ cassert(config_prof);
+
+ if (likely(prof_interval == 0)) {
+ return false;
+ }
+
+ return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
+}
+
+static inline void
+percpu_arena_update(tsd_t *tsd, unsigned cpu) {
+ assert(have_percpu_arena);
+ arena_t *oldarena = tsd_arena_get(tsd);
+ assert(oldarena != NULL);
+ unsigned oldind = arena_ind_get(oldarena);
+
+ if (oldind != cpu) {
+ unsigned newind = cpu;
+ arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ assert(newarena != NULL);
+
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
+ newarena);
+ }
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
new file mode 100644
index 000000000000..003abe116fb4
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
@@ -0,0 +1,361 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
+#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+
+static inline szind_t
+arena_bin_index(arena_t *arena, arena_bin_t *bin) {
+ szind_t binind = (szind_t)(bin - arena->bins);
+ assert(binind < NBINS);
+ return binind;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ /* Static check. */
+ if (alloc_ctx == NULL) {
+ const extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(!extent_slab_get(extent))) {
+ return large_prof_tctx_get(tsdn, extent);
+ }
+ } else {
+ if (unlikely(!alloc_ctx->slab)) {
+ return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
+ }
+ }
+ return (prof_tctx_t *)(uintptr_t)1U;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ /* Static check. */
+ if (alloc_ctx == NULL) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ if (unlikely(!extent_slab_get(extent))) {
+ large_prof_tctx_set(tsdn, extent, tctx);
+ }
+ } else {
+ if (unlikely(!alloc_ctx->slab)) {
+ large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
+ }
+ }
+}
+
+static inline void
+arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ assert(!extent_slab_get(extent));
+
+ large_prof_tctx_reset(tsdn, extent);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
+ tsd_t *tsd;
+ ticker_t *decay_ticker;
+
+ if (unlikely(tsdn_null(tsdn))) {
+ return;
+ }
+ tsd = tsdn_tsd(tsdn);
+ decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
+ if (unlikely(decay_ticker == NULL)) {
+ return;
+ }
+ if (unlikely(ticker_ticks(decay_ticker, nticks))) {
+ arena_decay(tsdn, arena, false, false);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
+
+ arena_decay_ticks(tsdn, arena, 1);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
+ tcache_t *tcache, bool slow_path) {
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(size != 0);
+
+ if (likely(tcache != NULL)) {
+ if (likely(size <= SMALL_MAXCLASS)) {
+ return tcache_alloc_small(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path);
+ }
+ if (likely(size <= tcache_maxclass)) {
+ return tcache_alloc_large(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path);
+ }
+ /* (size > tcache_maxclass) case falls through. */
+ assert(size > tcache_maxclass);
+ }
+
+ return arena_malloc_hard(tsdn, arena, size, ind, zero);
+}
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+arena_aalloc(tsdn_t *tsdn, const void *ptr) {
+ return extent_arena_get(iealloc(tsdn, ptr));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_salloc(tsdn_t *tsdn, const void *ptr) {
+ assert(ptr != NULL);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+ assert(szind != NSIZES);
+
+ return sz_index2size(szind);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
+ /*
+ * Return 0 if ptr is not within an extent managed by jemalloc. This
+ * function has two extra costs relative to isalloc():
+ * - The rtree calls cannot claim to be dependent lookups, which induces
+ * rtree lookup load dependencies.
+ * - The lookup may fail, so there is an extra branch to check for
+ * failure.
+ */
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *extent;
+ szind_t szind;
+ if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, false, &extent, &szind)) {
+ return 0;
+ }
+
+ if (extent == NULL) {
+ return 0;
+ }
+ assert(extent_state_get(extent) == extent_state_active);
+ /* Only slab members should be looked up via interior pointers. */
+ assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
+
+ assert(szind != NSIZES);
+
+ return sz_index2size(szind);
+}
+
+static inline void
+arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
+ assert(ptr != NULL);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ szind_t szind;
+ bool slab;
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ true, &szind, &slab);
+
+ if (config_debug) {
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(szind < NSIZES);
+ assert(slab == extent_slab_get(extent));
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ arena_dalloc_small(tsdn, ptr);
+ } else {
+ extent_t *extent = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, extent);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ alloc_ctx_t *alloc_ctx, bool slow_path) {
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(ptr != NULL);
+
+ if (unlikely(tcache == NULL)) {
+ arena_dalloc_no_tcache(tsdn, ptr);
+ return;
+ }
+
+ szind_t szind;
+ bool slab;
+ rtree_ctx_t *rtree_ctx;
+ if (alloc_ctx != NULL) {
+ szind = alloc_ctx->szind;
+ slab = alloc_ctx->slab;
+ assert(szind != NSIZES);
+ } else {
+ rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &szind, &slab);
+ }
+
+ if (config_debug) {
+ rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(szind < NSIZES);
+ assert(slab == extent_slab_get(extent));
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
+ slow_path);
+ } else {
+ if (szind < nhbins) {
+ if (config_prof && unlikely(szind < NBINS)) {
+ arena_dalloc_promoted(tsdn, ptr, tcache,
+ slow_path);
+ } else {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ szind, slow_path);
+ }
+ } else {
+ extent_t *extent = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, extent);
+ }
+ }
+}
+
+static inline void
+arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
+ assert(ptr != NULL);
+ assert(size <= LARGE_MAXCLASS);
+
+ szind_t szind;
+ bool slab;
+ if (!config_prof || !opt_prof) {
+ /*
+ * There is no risk of being confused by a promoted sampled
+ * object, so base szind and slab on the given size.
+ */
+ szind = sz_size2index(size);
+ slab = (szind < NBINS);
+ }
+
+ if ((config_prof && opt_prof) || config_debug) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &szind, &slab);
+
+ assert(szind == sz_size2index(size));
+ assert((config_prof && opt_prof) || slab == (szind < NBINS));
+
+ if (config_debug) {
+ extent_t *extent = rtree_extent_read(tsdn,
+ &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(slab == extent_slab_get(extent));
+ }
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ arena_dalloc_small(tsdn, ptr);
+ } else {
+ extent_t *extent = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, extent);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ alloc_ctx_t *alloc_ctx, bool slow_path) {
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(ptr != NULL);
+ assert(size <= LARGE_MAXCLASS);
+
+ if (unlikely(tcache == NULL)) {
+ arena_sdalloc_no_tcache(tsdn, ptr, size);
+ return;
+ }
+
+ szind_t szind;
+ bool slab;
+ UNUSED alloc_ctx_t local_ctx;
+ if (config_prof && opt_prof) {
+ if (alloc_ctx == NULL) {
+ /* Uncommon case and should be a static check. */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &local_ctx.szind,
+ &local_ctx.slab);
+ assert(local_ctx.szind == sz_size2index(size));
+ alloc_ctx = &local_ctx;
+ }
+ slab = alloc_ctx->slab;
+ szind = alloc_ctx->szind;
+ } else {
+ /*
+ * There is no risk of being confused by a promoted sampled
+ * object, so base szind and slab on the given size.
+ */
+ szind = sz_size2index(size);
+ slab = (szind < NBINS);
+ }
+
+ if (config_debug) {
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
+ rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &szind, &slab);
+ extent_t *extent = rtree_extent_read(tsdn,
+ &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
+ assert(szind == extent_szind_get(extent));
+ assert(slab == extent_slab_get(extent));
+ }
+
+ if (likely(slab)) {
+ /* Small allocation. */
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
+ slow_path);
+ } else {
+ if (szind < nhbins) {
+ if (config_prof && unlikely(szind < NBINS)) {
+ arena_dalloc_promoted(tsdn, ptr, tcache,
+ slow_path);
+ } else {
+ tcache_dalloc_large(tsdn_tsd(tsdn),
+ tcache, ptr, szind, slow_path);
+ }
+ } else {
+ extent_t *extent = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, extent);
+ }
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h
new file mode 100644
index 000000000000..46aa77c884b7
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h
@@ -0,0 +1,11 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
+#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
+
+#include "jemalloc/internal/bitmap.h"
+
+struct arena_slab_data_s {
+ /* Per region allocated/deallocated bitmap. */
+ bitmap_t bitmap[BITMAP_GROUPS_MAX];
+};
+
+#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h
new file mode 100644
index 000000000000..d1fffec1936e
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h
@@ -0,0 +1,284 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
+#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/smoothstep.h"
+#include "jemalloc/internal/stats.h"
+#include "jemalloc/internal/ticker.h"
+
+/*
+ * Read-only information associated with each element of arena_t's bins array
+ * is stored separately, partly to reduce memory usage (only one copy, rather
+ * than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each slab has the following layout:
+ *
+ * /--------------------\
+ * | region 0 |
+ * |--------------------|
+ * | region 1 |
+ * |--------------------|
+ * | ... |
+ * | ... |
+ * | ... |
+ * |--------------------|
+ * | region nregs-1 |
+ * \--------------------/
+ */
+struct arena_bin_info_s {
+ /* Size of regions in a slab for this bin's size class. */
+ size_t reg_size;
+
+ /* Total size of a slab for this bin's size class. */
+ size_t slab_size;
+
+ /* Total number of regions in a slab for this bin's size class. */
+ uint32_t nregs;
+
+ /*
+ * Metadata used to manipulate bitmaps for slabs associated with this
+ * bin.
+ */
+ bitmap_info_t bitmap_info;
+};
+
+struct arena_decay_s {
+ /* Synchronizes all non-atomic fields. */
+ malloc_mutex_t mtx;
+ /*
+ * True if a thread is currently purging the extents associated with
+ * this decay structure.
+ */
+ bool purging;
+ /*
+ * Approximate time in milliseconds from the creation of a set of unused
+ * dirty pages until an equivalent set of unused dirty pages is purged
+ * and/or reused.
+ */
+ atomic_zd_t time_ms;
+ /* time / SMOOTHSTEP_NSTEPS. */
+ nstime_t interval;
+ /*
+ * Time at which the current decay interval logically started. We do
+ * not actually advance to a new epoch until sometime after it starts
+ * because of scheduling and computation delays, and it is even possible
+ * to completely skip epochs. In all cases, during epoch advancement we
+ * merge all relevant activity into the most recently recorded epoch.
+ */
+ nstime_t epoch;
+ /* Deadline randomness generator. */
+ uint64_t jitter_state;
+ /*
+ * Deadline for current epoch. This is the sum of interval and per
+ * epoch jitter which is a uniform random variable in [0..interval).
+ * Epochs always advance by precise multiples of interval, but we
+ * randomize the deadline to reduce the likelihood of arenas purging in
+ * lockstep.
+ */
+ nstime_t deadline;
+ /*
+ * Number of unpurged pages at beginning of current epoch. During epoch
+ * advancement we use the delta between arena->decay_*.nunpurged and
+ * extents_npages_get(&arena->extents_*) to determine how many dirty
+ * pages, if any, were generated.
+ */
+ size_t nunpurged;
+ /*
+ * Trailing log of how many unused dirty pages were generated during
+ * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
+ * element is the most recent epoch. Corresponding epoch times are
+ * relative to epoch.
+ */
+ size_t backlog[SMOOTHSTEP_NSTEPS];
+
+ /*
+ * Pointer to associated stats. These stats are embedded directly in
+ * the arena's stats due to how stats structures are shared between the
+ * arena and ctl code.
+ *
+ * Synchronization: Same as associated arena's stats field. */
+ decay_stats_t *stats;
+ /* Peak number of pages in associated extents. Used for debug only. */
+ uint64_t ceil_npages;
+};
+
+struct arena_bin_s {
+ /* All operations on arena_bin_t fields require lock ownership. */
+ malloc_mutex_t lock;
+
+ /*
+ * Current slab being used to service allocations of this bin's size
+ * class. slabcur is independent of slabs_{nonfull,full}; whenever
+ * slabcur is reassigned, the previous slab must be deallocated or
+ * inserted into slabs_{nonfull,full}.
+ */
+ extent_t *slabcur;
+
+ /*
+ * Heap of non-full slabs. This heap is used to assure that new
+ * allocations come from the non-full slab that is oldest/lowest in
+ * memory.
+ */
+ extent_heap_t slabs_nonfull;
+
+ /* List used to track full slabs. */
+ extent_list_t slabs_full;
+
+ /* Bin statistics. */
+ malloc_bin_stats_t stats;
+};
+
+struct arena_s {
+ /*
+ * Number of threads currently assigned to this arena. Each thread has
+ * two distinct assignments, one for application-serving allocation, and
+ * the other for internal metadata allocation. Internal metadata must
+ * not be allocated from arenas explicitly created via the arenas.create
+ * mallctl, because the arena.<i>.reset mallctl indiscriminately
+ * discards all allocations for the affected arena.
+ *
+ * 0: Application allocation.
+ * 1: Internal metadata allocation.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_u_t nthreads[2];
+
+ /*
+ * When percpu_arena is enabled, to amortize the cost of reading /
+ * updating the current CPU id, track the most recent thread accessing
+ * this arena, and only read CPU if there is a mismatch.
+ */
+ tsdn_t *last_thd;
+
+ /* Synchronization: internal. */
+ arena_stats_t stats;
+
+ /*
+ * List of tcaches for extant threads associated with this arena.
+ * Stats from these are merged incrementally, and at exit if
+ * opt_stats_print is enabled.
+ *
+ * Synchronization: tcache_ql_mtx.
+ */
+ ql_head(tcache_t) tcache_ql;
+ malloc_mutex_t tcache_ql_mtx;
+
+ /* Synchronization: internal. */
+ prof_accum_t prof_accum;
+ uint64_t prof_accumbytes;
+
+ /*
+ * PRNG state for cache index randomization of large allocation base
+ * pointers.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t offset_state;
+
+ /*
+ * Extent serial number generator state.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t extent_sn_next;
+
+ /*
+ * Represents a dss_prec_t, but atomically.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_u_t dss_prec;
+
+ /*
+ * Number of pages in active extents.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t nactive;
+
+ /*
+ * Extant large allocations.
+ *
+ * Synchronization: large_mtx.
+ */
+ extent_list_t large;
+ /* Synchronizes all large allocation/update/deallocation. */
+ malloc_mutex_t large_mtx;
+
+ /*
+ * Collections of extents that were previously allocated. These are
+ * used when allocating extents, in an attempt to re-use address space.
+ *
+ * Synchronization: internal.
+ */
+ extents_t extents_dirty;
+ extents_t extents_muzzy;
+ extents_t extents_retained;
+
+ /*
+ * Decay-based purging state, responsible for scheduling extent state
+ * transitions.
+ *
+ * Synchronization: internal.
+ */
+ arena_decay_t decay_dirty; /* dirty --> muzzy */
+ arena_decay_t decay_muzzy; /* muzzy --> retained */
+
+ /*
+ * Next extent size class in a growing series to use when satisfying a
+ * request via the extent hooks (only if opt_retain). This limits the
+ * number of disjoint virtual memory ranges so that extent merging can
+ * be effective even if multiple arenas' extent allocation requests are
+ * highly interleaved.
+ *
+ * Synchronization: extent_grow_mtx
+ */
+ pszind_t extent_grow_next;
+ malloc_mutex_t extent_grow_mtx;
+
+ /*
+ * Available extent structures that were allocated via
+ * base_alloc_extent().
+ *
+ * Synchronization: extent_avail_mtx.
+ */
+ extent_tree_t extent_avail;
+ malloc_mutex_t extent_avail_mtx;
+
+ /*
+ * bins is used to store heaps of free regions.
+ *
+ * Synchronization: internal.
+ */
+ arena_bin_t bins[NBINS];
+
+ /*
+ * Base allocator, from which arena metadata are allocated.
+ *
+ * Synchronization: internal.
+ */
+ base_t *base;
+ /* Used to determine uptime. Read-only after initialization. */
+ nstime_t create_time;
+};
+
+/* Used in conjunction with tsd for fast arena-related context lookup. */
+struct arena_tdata_s {
+ ticker_t decay_ticker;
+};
+
+/* Used to pass rtree lookup context down the path. */
+struct alloc_ctx_s {
+ szind_t szind;
+ bool slab;
+};
+
+#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_types.h b/contrib/jemalloc/include/jemalloc/internal/arena_types.h
new file mode 100644
index 000000000000..a691bd811e03
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_types.h
@@ -0,0 +1,45 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
+#define JEMALLOC_INTERNAL_ARENA_TYPES_H
+
+/* Maximum number of regions in one slab. */
+#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
+#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
+
+/* Default decay times in milliseconds. */
+#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
+#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000)
+/* Number of event ticks between time checks. */
+#define DECAY_NTICKS_PER_UPDATE 1000
+
+typedef struct arena_slab_data_s arena_slab_data_t;
+typedef struct arena_bin_info_s arena_bin_info_t;
+typedef struct arena_decay_s arena_decay_t;
+typedef struct arena_bin_s arena_bin_t;
+typedef struct arena_s arena_t;
+typedef struct arena_tdata_s arena_tdata_t;
+typedef struct alloc_ctx_s alloc_ctx_t;
+
+typedef enum {
+ percpu_arena_mode_names_base = 0, /* Used for options processing. */
+
+ /*
+ * *_uninit are used only during bootstrapping, and must correspond
+ * to initialized variant plus percpu_arena_mode_enabled_base.
+ */
+ percpu_arena_uninit = 0,
+ per_phycpu_arena_uninit = 1,
+
+ /* All non-disabled modes must come after percpu_arena_disabled. */
+ percpu_arena_disabled = 2,
+
+ percpu_arena_mode_names_limit = 3, /* Used for options processing. */
+ percpu_arena_mode_enabled_base = 3,
+
+ percpu_arena = 3,
+ per_phycpu_arena = 4 /* Hyper threads share arena. */
+} percpu_arena_mode_t;
+
+#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
+#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
+
+#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/assert.h b/contrib/jemalloc/include/jemalloc/internal/assert.h
index 6f8f7eb93194..be4d45b32130 100644
--- a/contrib/jemalloc/include/jemalloc/internal/assert.h
+++ b/contrib/jemalloc/include/jemalloc/internal/assert.h
@@ -1,9 +1,12 @@
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/util.h"
+
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
-#define assert(e) do { \
+#define assert(e) do { \
if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
@@ -14,7 +17,7 @@
#endif
#ifndef not_reached
-#define not_reached() do { \
+#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
@@ -26,7 +29,7 @@
#endif
#ifndef not_implemented
-#define not_implemented() do { \
+#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
@@ -36,10 +39,18 @@
#endif
#ifndef assert_not_implemented
-#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) \
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) { \
not_implemented(); \
+ } \
} while (0)
#endif
-
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#ifndef cassert
+#define cassert(c) do { \
+ if (unlikely(!(c))) { \
+ not_reached(); \
+ } \
+} while (0)
+#endif
diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic.h b/contrib/jemalloc/include/jemalloc/internal/atomic.h
index 3f15ea1499ca..adadb1a3acb8 100644
--- a/contrib/jemalloc/include/jemalloc/internal/atomic.h
+++ b/contrib/jemalloc/include/jemalloc/internal/atomic.h
@@ -1,651 +1,77 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
-#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
-#define atomic_read_p(p) atomic_add_p(p, NULL)
-#define atomic_read_z(p) atomic_add_z(p, 0)
-#define atomic_read_u(p) atomic_add_u(p, 0)
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
+#ifndef JEMALLOC_INTERNAL_ATOMIC_H
+#define JEMALLOC_INTERNAL_ATOMIC_H
+
+#define ATOMIC_INLINE static inline
+
+#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
+# include "jemalloc/internal/atomic_gcc_atomic.h"
+#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
+# include "jemalloc/internal/atomic_gcc_sync.h"
+#elif defined(_MSC_VER)
+# include "jemalloc/internal/atomic_msvc.h"
+#elif defined(JEMALLOC_C11_ATOMICS)
+# include "jemalloc/internal/atomic_c11.h"
+#else
+# error "Don't have atomics implemented on this platform."
+#endif
/*
- * All arithmetic functions return the arithmetic result of the atomic
- * operation. Some atomic operation APIs return the value prior to mutation, in
- * which case the following functions must redundantly compute the result so
- * that it can be returned. These functions are normally inlined, so the extra
- * operations can be optimized away if the return values aren't used by the
- * callers.
+ * This header gives more or less a backport of C11 atomics. The user can write
+ * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
+ * counterparts of the C11 atomic functions for type, as so:
+ * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
+ * and then write things like:
+ * int *some_ptr;
+ * atomic_pi_t atomic_ptr_to_int;
+ * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
+ * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
+ * assert(some_ptr == prev_value);
+ * and expect things to work in the obvious way.
*
- * <t> atomic_read_<t>(<t> *p) { return (*p); }
- * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
- * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
- * bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
- * {
- * if (*p != c)
- * return (true);
- * *p = s;
- * return (false);
- * }
- * void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
+ * Also included (with naming differences to avoid conflicts with the standard
+ * library):
+ * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
+ * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
*/
-#ifndef JEMALLOC_ENABLE_INLINE
-uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
-uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
-bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
-void atomic_write_uint64(uint64_t *p, uint64_t x);
-uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
-uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
-bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
-void atomic_write_uint32(uint32_t *p, uint32_t x);
-void *atomic_add_p(void **p, void *x);
-void *atomic_sub_p(void **p, void *x);
-bool atomic_cas_p(void **p, void *c, void *s);
-void atomic_write_p(void **p, const void *x);
-size_t atomic_add_z(size_t *p, size_t x);
-size_t atomic_sub_z(size_t *p, size_t x);
-bool atomic_cas_z(size_t *p, size_t c, size_t s);
-void atomic_write_z(size_t *p, size_t x);
-unsigned atomic_add_u(unsigned *p, unsigned x);
-unsigned atomic_sub_u(unsigned *p, unsigned x);
-bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
-void atomic_write_u(unsigned *p, unsigned x);
-#endif
+/*
+ * Pure convenience, so that we don't have to type "atomic_memory_order_"
+ * quite so often.
+ */
+#define ATOMIC_RELAXED atomic_memory_order_relaxed
+#define ATOMIC_ACQUIRE atomic_memory_order_acquire
+#define ATOMIC_RELEASE atomic_memory_order_release
+#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
+#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
-/******************************************************************************/
-/* 64-bit operations. */
+/*
+ * Not all platforms have 64-bit atomics. If we do, this #define exposes that
+ * fact.
+ */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-# if (defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE uint64_t
-atomic_add_uint64(uint64_t *p, uint64_t x)
-{
- uint64_t t = x;
-
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (t), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
-
- return (t + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_uint64(uint64_t *p, uint64_t x)
-{
- uint64_t t;
-
- x = (uint64_t)(-(int64_t)x);
- t = x;
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (t), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
-
- return (t + x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
-{
- uint8_t success;
-
- asm volatile (
- "lock; cmpxchgq %4, %0;"
- "sete %1;"
- : "=m" (*p), "=a" (success) /* Outputs. */
- : "m" (*p), "a" (c), "r" (s) /* Inputs. */
- : "memory" /* Clobbers. */
- );
-
- return (!(bool)success);
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint64(uint64_t *p, uint64_t x)
-{
-
- asm volatile (
- "xchgq %1, %0;" /* Lock is implied by xchgq. */
- : "=m" (*p), "+r" (x) /* Outputs. */
- : "m" (*p) /* Inputs. */
- : "memory" /* Clobbers. */
- );
-}
-# elif (defined(JEMALLOC_C11ATOMICS))
-JEMALLOC_INLINE uint64_t
-atomic_add_uint64(uint64_t *p, uint64_t x)
-{
- volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
- return (atomic_fetch_add(a, x) + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_uint64(uint64_t *p, uint64_t x)
-{
- volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
- return (atomic_fetch_sub(a, x) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
-{
- volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
- return (!atomic_compare_exchange_strong(a, &c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint64(uint64_t *p, uint64_t x)
-{
- volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
- atomic_store(a, x);
-}
-# elif (defined(JEMALLOC_ATOMIC9))
-JEMALLOC_INLINE uint64_t
-atomic_add_uint64(uint64_t *p, uint64_t x)
-{
-
- /*
- * atomic_fetchadd_64() doesn't exist, but we only ever use this
- * function on LP64 systems, so atomic_fetchadd_long() will do.
- */
- assert(sizeof(uint64_t) == sizeof(unsigned long));
-
- return (atomic_fetchadd_long(p, (unsigned long)x) + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_uint64(uint64_t *p, uint64_t x)
-{
-
- assert(sizeof(uint64_t) == sizeof(unsigned long));
-
- return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
-{
-
- assert(sizeof(uint64_t) == sizeof(unsigned long));
-
- return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint64(uint64_t *p, uint64_t x)
-{
-
- assert(sizeof(uint64_t) == sizeof(unsigned long));
-
- atomic_store_rel_long(p, x);
-}
-# elif (defined(JEMALLOC_OSATOMIC))
-JEMALLOC_INLINE uint64_t
-atomic_add_uint64(uint64_t *p, uint64_t x)
-{
-
- return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_uint64(uint64_t *p, uint64_t x)
-{
-
- return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
-{
-
- return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint64(uint64_t *p, uint64_t x)
-{
- uint64_t o;
-
- /*The documented OSAtomic*() API does not expose an atomic exchange. */
- do {
- o = atomic_read_uint64(p);
- } while (atomic_cas_uint64(p, o, x));
-}
-# elif (defined(_MSC_VER))
-JEMALLOC_INLINE uint64_t
-atomic_add_uint64(uint64_t *p, uint64_t x)
-{
-
- return (InterlockedExchangeAdd64(p, x) + x);
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_uint64(uint64_t *p, uint64_t x)
-{
-
- return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
-{
- uint64_t o;
-
- o = InterlockedCompareExchange64(p, s, c);
- return (o != c);
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint64(uint64_t *p, uint64_t x)
-{
-
- InterlockedExchange64(p, x);
-}
-# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
- defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
-JEMALLOC_INLINE uint64_t
-atomic_add_uint64(uint64_t *p, uint64_t x)
-{
-
- return (__sync_add_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE uint64_t
-atomic_sub_uint64(uint64_t *p, uint64_t x)
-{
-
- return (__sync_sub_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
-{
-
- return (!__sync_bool_compare_and_swap(p, c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint64(uint64_t *p, uint64_t x)
-{
-
- __sync_lock_test_and_set(p, x);
-}
-# else
-# error "Missing implementation for 64-bit atomic operations"
-# endif
+# define JEMALLOC_ATOMIC_U64
#endif
-/******************************************************************************/
-/* 32-bit operations. */
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE uint32_t
-atomic_add_uint32(uint32_t *p, uint32_t x)
-{
- uint32_t t = x;
-
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (t), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
-
- return (t + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_uint32(uint32_t *p, uint32_t x)
-{
- uint32_t t;
-
- x = (uint32_t)(-(int32_t)x);
- t = x;
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (t), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
-
- return (t + x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
-{
- uint8_t success;
-
- asm volatile (
- "lock; cmpxchgl %4, %0;"
- "sete %1;"
- : "=m" (*p), "=a" (success) /* Outputs. */
- : "m" (*p), "a" (c), "r" (s) /* Inputs. */
- : "memory"
- );
-
- return (!(bool)success);
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint32(uint32_t *p, uint32_t x)
-{
-
- asm volatile (
- "xchgl %1, %0;" /* Lock is implied by xchgl. */
- : "=m" (*p), "+r" (x) /* Outputs. */
- : "m" (*p) /* Inputs. */
- : "memory" /* Clobbers. */
- );
-}
-# elif (defined(JEMALLOC_C11ATOMICS))
-JEMALLOC_INLINE uint32_t
-atomic_add_uint32(uint32_t *p, uint32_t x)
-{
- volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
- return (atomic_fetch_add(a, x) + x);
-}
+JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
-JEMALLOC_INLINE uint32_t
-atomic_sub_uint32(uint32_t *p, uint32_t x)
-{
- volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
- return (atomic_fetch_sub(a, x) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
-{
- volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
- return (!atomic_compare_exchange_strong(a, &c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint32(uint32_t *p, uint32_t x)
-{
- volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
- atomic_store(a, x);
-}
-#elif (defined(JEMALLOC_ATOMIC9))
-JEMALLOC_INLINE uint32_t
-atomic_add_uint32(uint32_t *p, uint32_t x)
-{
-
- return (atomic_fetchadd_32(p, x) + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_uint32(uint32_t *p, uint32_t x)
-{
-
- return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
-{
-
- return (!atomic_cmpset_32(p, c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint32(uint32_t *p, uint32_t x)
-{
-
- atomic_store_rel_32(p, x);
-}
-#elif (defined(JEMALLOC_OSATOMIC))
-JEMALLOC_INLINE uint32_t
-atomic_add_uint32(uint32_t *p, uint32_t x)
-{
-
- return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_uint32(uint32_t *p, uint32_t x)
-{
-
- return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
-{
-
- return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint32(uint32_t *p, uint32_t x)
-{
- uint32_t o;
-
- /*The documented OSAtomic*() API does not expose an atomic exchange. */
- do {
- o = atomic_read_uint32(p);
- } while (atomic_cas_uint32(p, o, x));
-}
-#elif (defined(_MSC_VER))
-JEMALLOC_INLINE uint32_t
-atomic_add_uint32(uint32_t *p, uint32_t x)
-{
-
- return (InterlockedExchangeAdd(p, x) + x);
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_uint32(uint32_t *p, uint32_t x)
-{
-
- return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
-{
- uint32_t o;
-
- o = InterlockedCompareExchange(p, s, c);
- return (o != c);
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint32(uint32_t *p, uint32_t x)
-{
-
- InterlockedExchange(p, x);
-}
-#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
- defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
-JEMALLOC_INLINE uint32_t
-atomic_add_uint32(uint32_t *p, uint32_t x)
-{
-
- return (__sync_add_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE uint32_t
-atomic_sub_uint32(uint32_t *p, uint32_t x)
-{
-
- return (__sync_sub_and_fetch(p, x));
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
-{
-
- return (!__sync_bool_compare_and_swap(p, c, s));
-}
-
-JEMALLOC_INLINE void
-atomic_write_uint32(uint32_t *p, uint32_t x)
-{
-
- __sync_lock_test_and_set(p, x);
-}
-#else
-# error "Missing implementation for 32-bit atomic operations"
-#endif
-
-/******************************************************************************/
-/* Pointer operations. */
-JEMALLOC_INLINE void *
-atomic_add_p(void **p, void *x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
-#elif (LG_SIZEOF_PTR == 2)
- return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
-#endif
-}
-
-JEMALLOC_INLINE void *
-atomic_sub_p(void **p, void *x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return ((void *)atomic_add_uint64((uint64_t *)p,
- (uint64_t)-((int64_t)x)));
-#elif (LG_SIZEOF_PTR == 2)
- return ((void *)atomic_add_uint32((uint32_t *)p,
- (uint32_t)-((int32_t)x)));
-#endif
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_p(void **p, void *c, void *s)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
-#elif (LG_SIZEOF_PTR == 2)
- return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
-#endif
-}
-
-JEMALLOC_INLINE void
-atomic_write_p(void **p, const void *x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- atomic_write_uint64((uint64_t *)p, (uint64_t)x);
-#elif (LG_SIZEOF_PTR == 2)
- atomic_write_uint32((uint32_t *)p, (uint32_t)x);
-#endif
-}
-
-/******************************************************************************/
-/* size_t operations. */
-JEMALLOC_INLINE size_t
-atomic_add_z(size_t *p, size_t x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
-#elif (LG_SIZEOF_PTR == 2)
- return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
-#endif
-}
-
-JEMALLOC_INLINE size_t
-atomic_sub_z(size_t *p, size_t x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return ((size_t)atomic_add_uint64((uint64_t *)p,
- (uint64_t)-((int64_t)x)));
-#elif (LG_SIZEOF_PTR == 2)
- return ((size_t)atomic_add_uint32((uint32_t *)p,
- (uint32_t)-((int32_t)x)));
-#endif
-}
-
-JEMALLOC_INLINE bool
-atomic_cas_z(size_t *p, size_t c, size_t s)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
-#elif (LG_SIZEOF_PTR == 2)
- return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
-#endif
-}
-
-JEMALLOC_INLINE void
-atomic_write_z(size_t *p, size_t x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- atomic_write_uint64((uint64_t *)p, (uint64_t)x);
-#elif (LG_SIZEOF_PTR == 2)
- atomic_write_uint32((uint32_t *)p, (uint32_t)x);
-#endif
-}
-
-/******************************************************************************/
-/* unsigned operations. */
-JEMALLOC_INLINE unsigned
-atomic_add_u(unsigned *p, unsigned x)
-{
-
-#if (LG_SIZEOF_INT == 3)
- return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
-#elif (LG_SIZEOF_INT == 2)
- return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
-#endif
-}
-
-JEMALLOC_INLINE unsigned
-atomic_sub_u(unsigned *p, unsigned x)
-{
+/*
+ * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
+ * platform that actually needs to know the size, MSVC.
+ */
+JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
-#if (LG_SIZEOF_INT == 3)
- return ((unsigned)atomic_add_uint64((uint64_t *)p,
- (uint64_t)-((int64_t)x)));
-#elif (LG_SIZEOF_INT == 2)
- return ((unsigned)atomic_add_uint32((uint32_t *)p,
- (uint32_t)-((int32_t)x)));
-#endif
-}
+JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
-JEMALLOC_INLINE bool
-atomic_cas_u(unsigned *p, unsigned c, unsigned s)
-{
+JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
-#if (LG_SIZEOF_INT == 3)
- return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
-#elif (LG_SIZEOF_INT == 2)
- return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
-#endif
-}
+JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
-JEMALLOC_INLINE void
-atomic_write_u(unsigned *p, unsigned x)
-{
+JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
-#if (LG_SIZEOF_INT == 3)
- atomic_write_uint64((uint64_t *)p, (uint64_t)x);
-#elif (LG_SIZEOF_INT == 2)
- atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#ifdef JEMALLOC_ATOMIC_U64
+JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
#endif
-}
-/******************************************************************************/
-#endif
+#undef ATOMIC_INLINE
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic_c11.h b/contrib/jemalloc/include/jemalloc/internal/atomic_c11.h
new file mode 100644
index 000000000000..a5f9313a619d
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/atomic_c11.h
@@ -0,0 +1,97 @@
+#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
+#define JEMALLOC_INTERNAL_ATOMIC_C11_H
+
+#include <stdatomic.h>
+
+#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
+
+#define atomic_memory_order_t memory_order
+#define atomic_memory_order_relaxed memory_order_relaxed
+#define atomic_memory_order_acquire memory_order_acquire
+#define atomic_memory_order_release memory_order_release
+#define atomic_memory_order_acq_rel memory_order_acq_rel
+#define atomic_memory_order_seq_cst memory_order_seq_cst
+
+#define atomic_fence atomic_thread_fence
+
+#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+typedef _Atomic(type) atomic_##short_type##_t; \
+ \
+ATOMIC_INLINE type \
+atomic_load_##short_type(const atomic_##short_type##_t *a, \
+ atomic_memory_order_t mo) { \
+ /* \
+ * A strict interpretation of the C standard prevents \
+ * atomic_load from taking a const argument, but it's \
+ * convenient for our purposes. This cast is a workaround. \
+ */ \
+ atomic_##short_type##_t* a_nonconst = \
+ (atomic_##short_type##_t*)a; \
+ return atomic_load_explicit(a_nonconst, mo); \
+} \
+ \
+ATOMIC_INLINE void \
+atomic_store_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ atomic_store_explicit(a, val, mo); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return atomic_exchange_explicit(a, val, mo); \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ return atomic_compare_exchange_weak_explicit(a, expected, \
+ desired, success_mo, failure_mo); \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ return atomic_compare_exchange_strong_explicit(a, expected, \
+ desired, success_mo, failure_mo); \
+}
+
+/*
+ * Integral types have some special operations available that non-integral ones
+ * lack.
+ */
+#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return atomic_fetch_add_explicit(a, val, mo); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return atomic_fetch_sub_explicit(a, val, mo); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return atomic_fetch_and_explicit(a, val, mo); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return atomic_fetch_or_explicit(a, val, mo); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return atomic_fetch_xor_explicit(a, val, mo); \
+}
+
+#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/contrib/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
new file mode 100644
index 000000000000..6b73a14f81de
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
@@ -0,0 +1,127 @@
+#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
+#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
+
+#include "jemalloc/internal/assert.h"
+
+#define ATOMIC_INIT(...) {__VA_ARGS__}
+
+typedef enum {
+ atomic_memory_order_relaxed,
+ atomic_memory_order_acquire,
+ atomic_memory_order_release,
+ atomic_memory_order_acq_rel,
+ atomic_memory_order_seq_cst
+} atomic_memory_order_t;
+
+ATOMIC_INLINE int
+atomic_enum_to_builtin(atomic_memory_order_t mo) {
+ switch (mo) {
+ case atomic_memory_order_relaxed:
+ return __ATOMIC_RELAXED;
+ case atomic_memory_order_acquire:
+ return __ATOMIC_ACQUIRE;
+ case atomic_memory_order_release:
+ return __ATOMIC_RELEASE;
+ case atomic_memory_order_acq_rel:
+ return __ATOMIC_ACQ_REL;
+ case atomic_memory_order_seq_cst:
+ return __ATOMIC_SEQ_CST;
+ }
+ /* Can't happen; the switch is exhaustive. */
+ not_reached();
+}
+
+ATOMIC_INLINE void
+atomic_fence(atomic_memory_order_t mo) {
+ __atomic_thread_fence(atomic_enum_to_builtin(mo));
+}
+
+#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+typedef struct { \
+ type repr; \
+} atomic_##short_type##_t; \
+ \
+ATOMIC_INLINE type \
+atomic_load_##short_type(const atomic_##short_type##_t *a, \
+ atomic_memory_order_t mo) { \
+ type result; \
+ __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
+ return result; \
+} \
+ \
+ATOMIC_INLINE void \
+atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ type result; \
+ __atomic_exchange(&a->repr, &val, &result, \
+ atomic_enum_to_builtin(mo)); \
+ return result; \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ return __atomic_compare_exchange(&a->repr, expected, &desired, \
+ true, atomic_enum_to_builtin(success_mo), \
+ atomic_enum_to_builtin(failure_mo)); \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ return __atomic_compare_exchange(&a->repr, expected, &desired, \
+ false, \
+ atomic_enum_to_builtin(success_mo), \
+ atomic_enum_to_builtin(failure_mo)); \
+}
+
+
+#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_add(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_sub(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_and(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_or(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __atomic_fetch_xor(&a->repr, val, \
+ atomic_enum_to_builtin(mo)); \
+}
+
+#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h b/contrib/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h
new file mode 100644
index 000000000000..30846e4d27bd
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h
@@ -0,0 +1,191 @@
+#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
+#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
+
+#define ATOMIC_INIT(...) {__VA_ARGS__}
+
+typedef enum {
+ atomic_memory_order_relaxed,
+ atomic_memory_order_acquire,
+ atomic_memory_order_release,
+ atomic_memory_order_acq_rel,
+ atomic_memory_order_seq_cst
+} atomic_memory_order_t;
+
+ATOMIC_INLINE void
+atomic_fence(atomic_memory_order_t mo) {
+ /* Easy cases first: no barrier, and full barrier. */
+ if (mo == atomic_memory_order_relaxed) {
+ asm volatile("" ::: "memory");
+ return;
+ }
+ if (mo == atomic_memory_order_seq_cst) {
+ asm volatile("" ::: "memory");
+ __sync_synchronize();
+ asm volatile("" ::: "memory");
+ return;
+ }
+ asm volatile("" ::: "memory");
+# if defined(__i386__) || defined(__x86_64__)
+ /* This is implicit on x86. */
+# elif defined(__ppc__)
+ asm volatile("lwsync");
+# elif defined(__sparc__) && defined(__arch64__)
+ if (mo == atomic_memory_order_acquire) {
+ asm volatile("membar #LoadLoad | #LoadStore");
+ } else if (mo == atomic_memory_order_release) {
+ asm volatile("membar #LoadStore | #StoreStore");
+ } else {
+ asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
+ }
+# else
+ __sync_synchronize();
+# endif
+ asm volatile("" ::: "memory");
+}
+
+/*
+ * A correct implementation of seq_cst loads and stores on weakly ordered
+ * architectures could do either of the following:
+ * 1. store() is weak-fence -> store -> strong fence, load() is load ->
+ * strong-fence.
+ * 2. store() is strong-fence -> store, load() is strong-fence -> load ->
+ * weak-fence.
+ * The tricky thing is, load() and store() above can be the load or store
+ * portions of a gcc __sync builtin, so we have to follow GCC's lead, which
+ * means going with strategy 2.
+ * On strongly ordered architectures, the natural strategy is to stick a strong
+ * fence after seq_cst stores, and have naked loads. So we want the strong
+ * fences in different places on different architectures.
+ * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
+ * accomplish this.
+ */
+
+ATOMIC_INLINE void
+atomic_pre_sc_load_fence() {
+# if defined(__i386__) || defined(__x86_64__) || \
+ (defined(__sparc__) && defined(__arch64__))
+ atomic_fence(atomic_memory_order_relaxed);
+# else
+ atomic_fence(atomic_memory_order_seq_cst);
+# endif
+}
+
+ATOMIC_INLINE void
+atomic_post_sc_store_fence() {
+# if defined(__i386__) || defined(__x86_64__) || \
+ (defined(__sparc__) && defined(__arch64__))
+ atomic_fence(atomic_memory_order_seq_cst);
+# else
+ atomic_fence(atomic_memory_order_relaxed);
+# endif
+
+}
+
+#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+typedef struct { \
+ type volatile repr; \
+} atomic_##short_type##_t; \
+ \
+ATOMIC_INLINE type \
+atomic_load_##short_type(const atomic_##short_type##_t *a, \
+ atomic_memory_order_t mo) { \
+ if (mo == atomic_memory_order_seq_cst) { \
+ atomic_pre_sc_load_fence(); \
+ } \
+ type result = a->repr; \
+ if (mo != atomic_memory_order_relaxed) { \
+ atomic_fence(atomic_memory_order_acquire); \
+ } \
+ return result; \
+} \
+ \
+ATOMIC_INLINE void \
+atomic_store_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ if (mo != atomic_memory_order_relaxed) { \
+ atomic_fence(atomic_memory_order_release); \
+ } \
+ a->repr = val; \
+ if (mo == atomic_memory_order_seq_cst) { \
+ atomic_post_sc_store_fence(); \
+ } \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ /* \
+ * Because of FreeBSD, we care about gcc 4.2, which doesn't have\
+ * an atomic exchange builtin. We fake it with a CAS loop. \
+ */ \
+ while (true) { \
+ type old = a->repr; \
+ if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
+ return old; \
+ } \
+ } \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
+ desired); \
+ if (prev == *expected) { \
+ return true; \
+ } else { \
+ *expected = prev; \
+ return false; \
+ } \
+} \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
+ desired); \
+ if (prev == *expected) { \
+ return true; \
+ } else { \
+ *expected = prev; \
+ return false; \
+ } \
+}
+
+#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
+ /* unused */ lg_size) \
+JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __sync_fetch_and_add(&a->repr, val); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __sync_fetch_and_sub(&a->repr, val); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __sync_fetch_and_and(&a->repr, val); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __sync_fetch_and_or(&a->repr, val); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return __sync_fetch_and_xor(&a->repr, val); \
+}
+
+#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h
new file mode 100644
index 000000000000..8b4b8471a951
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h
@@ -0,0 +1,31 @@
+#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
+#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
+
+extern bool opt_background_thread;
+extern malloc_mutex_t background_thread_lock;
+extern atomic_b_t background_thread_enabled_state;
+extern size_t n_background_threads;
+extern background_thread_info_t *background_thread_info;
+extern bool can_enable_background_thread;
+
+bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
+bool background_threads_enable(tsd_t *tsd);
+bool background_threads_disable(tsd_t *tsd);
+void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, size_t npages_new);
+void background_thread_prefork0(tsdn_t *tsdn);
+void background_thread_prefork1(tsdn_t *tsdn);
+void background_thread_postfork_parent(tsdn_t *tsdn);
+void background_thread_postfork_child(tsdn_t *tsdn);
+bool background_thread_stats_read(tsdn_t *tsdn,
+ background_thread_stats_t *stats);
+void background_thread_ctl_init(tsdn_t *tsdn);
+
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+#endif
+bool background_thread_boot0(void);
+bool background_thread_boot1(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
new file mode 100644
index 000000000000..fd5095f253f1
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
@@ -0,0 +1,56 @@
+#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
+#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
+
+JEMALLOC_ALWAYS_INLINE bool
+background_thread_enabled(void) {
+ return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+background_thread_enabled_set(tsdn_t *tsdn, bool state) {
+ malloc_mutex_assert_owner(tsdn, &background_thread_lock);
+ atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
+}
+
+JEMALLOC_ALWAYS_INLINE background_thread_info_t *
+arena_background_thread_info_get(arena_t *arena) {
+ unsigned arena_ind = arena_ind_get(arena);
+ return &background_thread_info[arena_ind % ncpus];
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+background_thread_wakeup_time_get(background_thread_info_t *info) {
+ uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
+ assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
+ (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
+ return next_wakeup;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
+ uint64_t wakeup_time) {
+ malloc_mutex_assert_owner(tsdn, &info->mtx);
+ atomic_store_b(&info->indefinite_sleep,
+ wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
+ nstime_init(&info->next_wakeup, wakeup_time);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+background_thread_indefinite_sleep(background_thread_info_t *info) {
+ return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena) {
+ if (!background_thread_enabled()) {
+ return;
+ }
+ background_thread_info_t *info =
+ arena_background_thread_info_get(arena);
+ if (background_thread_indefinite_sleep(info)) {
+ background_thread_interval_check(tsdn, arena,
+ &arena->decay_dirty, 0);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h
new file mode 100644
index 000000000000..e69a7d022b41
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h
@@ -0,0 +1,52 @@
+#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
+#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
+
+/* This file really combines "structs" and "types", but only transitionally. */
+
+#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
+# define JEMALLOC_PTHREAD_CREATE_WRAPPER
+#endif
+
+#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
+
+typedef enum {
+ background_thread_stopped,
+ background_thread_started,
+ /* Thread waits on the global lock when paused (for arena_reset). */
+ background_thread_paused,
+} background_thread_state_t;
+
+struct background_thread_info_s {
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ /* Background thread is pthread specific. */
+ pthread_t thread;
+ pthread_cond_t cond;
+#endif
+ malloc_mutex_t mtx;
+ background_thread_state_t state;
+ /* When true, it means no wakeup scheduled. */
+ atomic_b_t indefinite_sleep;
+ /* Next scheduled wakeup time (absolute time in ns). */
+ nstime_t next_wakeup;
+ /*
+ * Since the last background thread run, newly added number of pages
+ * that need to be purged by the next wakeup. This is adjusted on
+ * epoch advance, and is used to determine whether we should signal the
+ * background thread to wake up earlier.
+ */
+ size_t npages_to_purge_new;
+ /* Stats: total number of runs since started. */
+ uint64_t tot_n_runs;
+ /* Stats: total sleep time since started. */
+ nstime_t tot_sleep_time;
+};
+typedef struct background_thread_info_s background_thread_info_t;
+
+struct background_thread_stats_s {
+ size_t num_threads;
+ uint64_t num_runs;
+ nstime_t run_interval;
+};
+typedef struct background_thread_stats_s background_thread_stats_t;
+
+#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base.h b/contrib/jemalloc/include/jemalloc/internal/base.h
deleted file mode 100644
index d6b81e162e00..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/base.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void *base_alloc(tsdn_t *tsdn, size_t size);
-void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
- size_t *mapped);
-bool base_boot(void);
-void base_prefork(tsdn_t *tsdn);
-void base_postfork_parent(tsdn_t *tsdn);
-void base_postfork_child(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_externs.h b/contrib/jemalloc/include/jemalloc/internal/base_externs.h
new file mode 100644
index 000000000000..0a1114f4ac13
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/base_externs.h
@@ -0,0 +1,19 @@
+#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
+#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
+
+base_t *b0get(void);
+base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+void base_delete(base_t *base);
+extent_hooks_t *base_extent_hooks_get(base_t *base);
+extent_hooks_t *base_extent_hooks_set(base_t *base,
+ extent_hooks_t *extent_hooks);
+void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
+extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
+void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
+ size_t *resident, size_t *mapped);
+void base_prefork(tsdn_t *tsdn, base_t *base);
+void base_postfork_parent(tsdn_t *tsdn, base_t *base);
+void base_postfork_child(tsdn_t *tsdn, base_t *base);
+bool base_boot(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_inlines.h b/contrib/jemalloc/include/jemalloc/internal/base_inlines.h
new file mode 100644
index 000000000000..931560bfaeab
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/base_inlines.h
@@ -0,0 +1,9 @@
+#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
+#define JEMALLOC_INTERNAL_BASE_INLINES_H
+
+static inline unsigned
+base_ind_get(const base_t *base) {
+ return base->ind;
+}
+
+#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_structs.h b/contrib/jemalloc/include/jemalloc/internal/base_structs.h
new file mode 100644
index 000000000000..18e227bd5a62
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/base_structs.h
@@ -0,0 +1,55 @@
+#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
+#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/size_classes.h"
+
+/* Embedded at the beginning of every block of base-managed virtual memory. */
+struct base_block_s {
+ /* Total size of block's virtual memory mapping. */
+ size_t size;
+
+ /* Next block in list of base's blocks. */
+ base_block_t *next;
+
+ /* Tracks unused trailing space. */
+ extent_t extent;
+};
+
+struct base_s {
+ /* Associated arena's index within the arenas array. */
+ unsigned ind;
+
+ /*
+ * User-configurable extent hook functions. Points to an
+ * extent_hooks_t.
+ */
+ atomic_p_t extent_hooks;
+
+ /* Protects base_alloc() and base_stats_get() operations. */
+ malloc_mutex_t mtx;
+
+ /*
+ * Most recent size class in the series of increasingly large base
+ * extents. Logarithmic spacing between subsequent allocations ensures
+ * that the total number of distinct mappings remains small.
+ */
+ pszind_t pind_last;
+
+ /* Serial number generation state. */
+ size_t extent_sn_next;
+
+ /* Chain of all blocks associated with base. */
+ base_block_t *blocks;
+
+ /* Heap of extents that track unused trailing space within blocks. */
+ extent_heap_t avail[NSIZES];
+
+ /* Stats, only maintained if config_stats. */
+ size_t allocated;
+ size_t resident;
+ size_t mapped;
+};
+
+#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_types.h b/contrib/jemalloc/include/jemalloc/internal/base_types.h
new file mode 100644
index 000000000000..be7ee82589fb
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/base_types.h
@@ -0,0 +1,7 @@
+#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
+#define JEMALLOC_INTERNAL_BASE_TYPES_H
+
+typedef struct base_block_s base_block_t;
+typedef struct base_s base_t;
+
+#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/bit_util.h b/contrib/jemalloc/include/jemalloc/internal/bit_util.h
new file mode 100644
index 000000000000..8d078a8a35ef
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/bit_util.h
@@ -0,0 +1,165 @@
+#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
+#define JEMALLOC_INTERNAL_BIT_UTIL_H
+
+#include "jemalloc/internal/assert.h"
+
+#define BIT_UTIL_INLINE static inline
+
+/* Sanity check. */
+#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
+ || !defined(JEMALLOC_INTERNAL_FFS)
+# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
+#endif
+
+
+BIT_UTIL_INLINE unsigned
+ffs_llu(unsigned long long bitmap) {
+ return JEMALLOC_INTERNAL_FFSLL(bitmap);
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_lu(unsigned long bitmap) {
+ return JEMALLOC_INTERNAL_FFSL(bitmap);
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_u(unsigned bitmap) {
+ return JEMALLOC_INTERNAL_FFS(bitmap);
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_zu(size_t bitmap) {
+#if LG_SIZEOF_PTR == LG_SIZEOF_INT
+ return ffs_u(bitmap);
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
+ return ffs_lu(bitmap);
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
+ return ffs_llu(bitmap);
+#else
+#error No implementation for size_t ffs()
+#endif
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_u64(uint64_t bitmap) {
+#if LG_SIZEOF_LONG == 3
+ return ffs_lu(bitmap);
+#elif LG_SIZEOF_LONG_LONG == 3
+ return ffs_llu(bitmap);
+#else
+#error No implementation for 64-bit ffs()
+#endif
+}
+
+BIT_UTIL_INLINE unsigned
+ffs_u32(uint32_t bitmap) {
+#if LG_SIZEOF_INT == 2
+ return ffs_u(bitmap);
+#else
+#error No implementation for 32-bit ffs()
+#endif
+ return ffs_u(bitmap);
+}
+
+BIT_UTIL_INLINE uint64_t
+pow2_ceil_u64(uint64_t x) {
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x |= x >> 32;
+ x++;
+ return x;
+}
+
+BIT_UTIL_INLINE uint32_t
+pow2_ceil_u32(uint32_t x) {
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+ return x;
+}
+
+/* Compute the smallest power of 2 that is >= x. */
+BIT_UTIL_INLINE size_t
+pow2_ceil_zu(size_t x) {
+#if (LG_SIZEOF_PTR == 3)
+ return pow2_ceil_u64(x);
+#else
+ return pow2_ceil_u32(x);
+#endif
+}
+
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ size_t ret;
+ assert(x != 0);
+
+ asm ("bsr %1, %0"
+ : "=r"(ret) // Outputs.
+ : "r"(x) // Inputs.
+ );
+ assert(ret < UINT_MAX);
+ return (unsigned)ret;
+}
+#elif (defined(_MSC_VER))
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ unsigned long ret;
+
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == 3)
+ _BitScanReverse64(&ret, x);
+#elif (LG_SIZEOF_PTR == 2)
+ _BitScanReverse(&ret, x);
+#else
+# error "Unsupported type size for lg_floor()"
+#endif
+ assert(ret < UINT_MAX);
+ return (unsigned)ret;
+}
+#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
+ return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
+#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
+ return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
+#else
+# error "Unsupported type size for lg_floor()"
+#endif
+}
+#else
+BIT_UTIL_INLINE unsigned
+lg_floor(size_t x) {
+ assert(x != 0);
+
+ x |= (x >> 1);
+ x |= (x >> 2);
+ x |= (x >> 4);
+ x |= (x >> 8);
+ x |= (x >> 16);
+#if (LG_SIZEOF_PTR == 3)
+ x |= (x >> 32);
+#endif
+ if (x == SIZE_T_MAX) {
+ return (8 << LG_SIZEOF_PTR) - 1;
+ }
+ x++;
+ return ffs_zu(x) - 2;
+}
+#endif
+
+#undef BIT_UTIL_INLINE
+
+#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/bitmap.h b/contrib/jemalloc/include/jemalloc/internal/bitmap.h
index 36f38b59c3ae..ac990290a5b2 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bitmap.h
@@ -1,19 +1,27 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_BITMAP_H
+#define JEMALLOC_INTERNAL_BITMAP_H
-/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
-#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
-#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
+#include "jemalloc/internal/arena_types.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/size_classes.h"
-typedef struct bitmap_level_s bitmap_level_t;
-typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
-#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
+#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
+
+/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
+#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
+/* Maximum bitmap bit count is determined by maximum regions per slab. */
+# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
+#else
+/* Maximum bitmap bit count is determined by number of extent size classes. */
+# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
+#endif
+#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
/* Number of bits per group. */
-#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
-#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
-#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
+#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
+#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
+#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
@@ -21,81 +29,131 @@ typedef unsigned long bitmap_t;
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
-# define USE_TREE
+# define BITMAP_USE_TREE
#endif
/* Number of groups required to store a given number of bits. */
-#define BITMAP_BITS2GROUPS(nbits) \
- ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
+#define BITMAP_BITS2GROUPS(nbits) \
+ (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
/*
* Number of groups required at a particular level for a given number of bits.
*/
-#define BITMAP_GROUPS_L0(nbits) \
+#define BITMAP_GROUPS_L0(nbits) \
BITMAP_BITS2GROUPS(nbits)
-#define BITMAP_GROUPS_L1(nbits) \
+#define BITMAP_GROUPS_L1(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
-#define BITMAP_GROUPS_L2(nbits) \
+#define BITMAP_GROUPS_L2(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
-#define BITMAP_GROUPS_L3(nbits) \
+#define BITMAP_GROUPS_L3(nbits) \
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
BITMAP_BITS2GROUPS((nbits)))))
+#define BITMAP_GROUPS_L4(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
/*
* Assuming the number of levels, number of groups required for a given number
* of bits.
*/
-#define BITMAP_GROUPS_1_LEVEL(nbits) \
+#define BITMAP_GROUPS_1_LEVEL(nbits) \
BITMAP_GROUPS_L0(nbits)
-#define BITMAP_GROUPS_2_LEVEL(nbits) \
+#define BITMAP_GROUPS_2_LEVEL(nbits) \
(BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
-#define BITMAP_GROUPS_3_LEVEL(nbits) \
+#define BITMAP_GROUPS_3_LEVEL(nbits) \
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
-#define BITMAP_GROUPS_4_LEVEL(nbits) \
+#define BITMAP_GROUPS_4_LEVEL(nbits) \
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
+#define BITMAP_GROUPS_5_LEVEL(nbits) \
+ (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
-#ifdef USE_TREE
+#ifdef BITMAP_USE_TREE
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
+# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
#else
# error "Unsupported bitmap size"
#endif
-/* Maximum number of levels possible. */
-#define BITMAP_MAX_LEVELS \
- (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
- + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
+/*
+ * Maximum number of levels possible. This could be statically computed based
+ * on LG_BITMAP_MAXBITS:
+ *
+ * #define BITMAP_MAX_LEVELS \
+ * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
+ *
+ * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
+ * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
+ * various cascading macros. The only additional cost this incurs is some
+ * unused trailing entries in bitmap_info_t structures; the bitmaps themselves
+ * are not impacted.
+ */
+#define BITMAP_MAX_LEVELS 5
+
+#define BITMAP_INFO_INITIALIZER(nbits) { \
+ /* nbits. */ \
+ nbits, \
+ /* nlevels. */ \
+ (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
+ (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
+ (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
+ (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
+ /* levels. */ \
+ { \
+ {0}, \
+ {BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
+ BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
+ BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
+ {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
+ BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
+ + BITMAP_GROUPS_L0(nbits)} \
+ } \
+}
-#else /* USE_TREE */
+#else /* BITMAP_USE_TREE */
-#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
+#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
+#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
-#endif /* USE_TREE */
+#define BITMAP_INFO_INITIALIZER(nbits) { \
+ /* nbits. */ \
+ nbits, \
+ /* ngroups. */ \
+ BITMAP_BITS2GROUPS(nbits) \
+}
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
+#endif /* BITMAP_USE_TREE */
-struct bitmap_level_s {
+typedef struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
-};
+} bitmap_level_t;
-struct bitmap_info_s {
+typedef struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
-#ifdef USE_TREE
+#ifdef BITMAP_USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
@@ -104,37 +162,19 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
-#else /* USE_TREE */
+#else /* BITMAP_USE_TREE */
/* Number of groups necessary for nbits. */
size_t ngroups;
-#endif /* USE_TREE */
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
-void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
-size_t bitmap_size(const bitmap_info_t *binfo);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
-bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
-void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
-size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
-void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
-#endif
+#endif /* BITMAP_USE_TREE */
+} bitmap_info_t;
+
+void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
+void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
+size_t bitmap_size(const bitmap_info_t *binfo);
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
-JEMALLOC_INLINE bool
-bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
-#ifdef USE_TREE
+static inline bool
+bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
+#ifdef BITMAP_USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
@@ -143,28 +183,27 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
size_t i;
for (i = 0; i < binfo->ngroups; i++) {
- if (bitmap[i] != 0)
- return (false);
+ if (bitmap[i] != 0) {
+ return false;
+ }
}
- return (true);
+ return true;
#endif
}
-JEMALLOC_INLINE bool
-bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
+static inline bool
+bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
- return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
+ return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
}
-JEMALLOC_INLINE void
-bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
+static inline void
+bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
@@ -178,7 +217,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
-#ifdef USE_TREE
+#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
@@ -190,24 +229,83 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (g != 0)
+ if (g != 0) {
break;
+ }
}
}
#endif
}
+/* ffu: find first unset >= bit. */
+static inline size_t
+bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
+ assert(min_bit < binfo->nbits);
+
+#ifdef BITMAP_USE_TREE
+ size_t bit = 0;
+ for (unsigned level = binfo->nlevels; level--;) {
+ size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
+ 1));
+ bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
+ >> lg_bits_per_group)];
+ unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
+ bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
+ assert(group_nmask <= BITMAP_GROUP_NBITS);
+ bitmap_t group_mask = ~((1LU << group_nmask) - 1);
+ bitmap_t group_masked = group & group_mask;
+ if (group_masked == 0LU) {
+ if (group == 0LU) {
+ return binfo->nbits;
+ }
+ /*
+ * min_bit was preceded by one or more unset bits in
+ * this group, but there are no other unset bits in this
+ * group. Try again starting at the first bit of the
+ * next sibling. This will recurse at most once per
+ * non-root level.
+ */
+ size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
+ assert(sib_base > min_bit);
+ assert(sib_base > bit);
+ if (sib_base >= binfo->nbits) {
+ return binfo->nbits;
+ }
+ return bitmap_ffu(bitmap, binfo, sib_base);
+ }
+ bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
+ (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
+ }
+ assert(bit >= min_bit);
+ assert(bit < binfo->nbits);
+ return bit;
+#else
+ size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
+ bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
+ - 1);
+ size_t bit;
+ do {
+ bit = ffs_lu(g);
+ if (bit != 0) {
+ return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
+ }
+ i++;
+ g = bitmap[i];
+ } while (i < binfo->ngroups);
+ return binfo->nbits;
+#endif
+}
+
/* sfu: set first unset. */
-JEMALLOC_INLINE size_t
-bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+static inline size_t
+bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t bit;
bitmap_t g;
unsigned i;
assert(!bitmap_full(bitmap, binfo));
-#ifdef USE_TREE
+#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
@@ -226,12 +324,11 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
- return (bit);
+ return bit;
}
-JEMALLOC_INLINE void
-bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
+static inline void
+bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
@@ -247,7 +344,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(!bitmap_get(bitmap, binfo, bit));
-#ifdef USE_TREE
+#ifdef BITMAP_USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@@ -261,14 +358,12 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
== 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (!propagate)
+ if (!propagate) {
break;
+ }
}
}
-#endif /* USE_TREE */
+#endif /* BITMAP_USE_TREE */
}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_BITMAP_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk.h b/contrib/jemalloc/include/jemalloc/internal/chunk.h
deleted file mode 100644
index 55df9acc7bb4..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/chunk.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * Size and alignment of memory chunks that are allocated by the OS's virtual
- * memory system.
- */
-#define LG_CHUNK_DEFAULT 21
-
-/* Return the chunk address for allocation address a. */
-#define CHUNK_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~chunksize_mask))
-
-/* Return the chunk offset of address a. */
-#define CHUNK_ADDR2OFFSET(a) \
- ((size_t)((uintptr_t)(a) & chunksize_mask))
-
-/* Return the smallest chunk multiple that is >= s. */
-#define CHUNK_CEILING(s) \
- (((s) + chunksize_mask) & ~chunksize_mask)
-
-#define CHUNK_HOOKS_INITIALIZER { \
- NULL, \
- NULL, \
- NULL, \
- NULL, \
- NULL, \
- NULL, \
- NULL \
-}
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern size_t opt_lg_chunk;
-extern const char *opt_dss;
-
-extern rtree_t chunks_rtree;
-
-extern size_t chunksize;
-extern size_t chunksize_mask; /* (chunksize - 1). */
-extern size_t chunk_npages;
-
-extern const chunk_hooks_t chunk_hooks_default;
-
-chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
-chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
- const chunk_hooks_t *chunk_hooks);
-
-bool chunk_register(const void *chunk, const extent_node_t *node,
- bool *gdump);
-void chunk_deregister(const void *chunk, const extent_node_t *node);
-void *chunk_alloc_base(size_t size);
-void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- size_t *sn, bool *zero, bool *commit, bool dalloc_node);
-void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- size_t *sn, bool *zero, bool *commit);
-void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
- bool committed);
-void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
- bool zeroed, bool committed);
-bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
- size_t length);
-bool chunk_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-extent_node_t *chunk_lookup(const void *chunk, bool dependent);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
-JEMALLOC_INLINE extent_node_t *
-chunk_lookup(const void *ptr, bool dependent)
-{
-
- return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
-#include "jemalloc/internal/chunk_dss.h"
-#include "jemalloc/internal/chunk_mmap.h"
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
deleted file mode 100644
index da8511ba06b0..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef enum {
- dss_prec_disabled = 0,
- dss_prec_primary = 1,
- dss_prec_secondary = 2,
-
- dss_prec_limit = 3
-} dss_prec_t;
-#define DSS_PREC_DEFAULT dss_prec_secondary
-#define DSS_DEFAULT "secondary"
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-extern const char *dss_prec_names[];
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-dss_prec_t chunk_dss_prec_get(void);
-bool chunk_dss_prec_set(dss_prec_t dss_prec);
-void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit);
-bool chunk_in_dss(void *chunk);
-bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
-void chunk_dss_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h b/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
deleted file mode 100644
index 6f2d0ac2ed37..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit);
-bool chunk_dalloc_mmap(void *chunk, size_t size);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/ckh.h b/contrib/jemalloc/include/jemalloc/internal/ckh.h
index f75ad90b73cc..7b3850bc168e 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ckh.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ckh.h
@@ -1,86 +1,101 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_CKH_H
+#define JEMALLOC_INTERNAL_CKH_H
-typedef struct ckh_s ckh_t;
-typedef struct ckhc_s ckhc_t;
+#include "jemalloc/internal/tsd.h"
-/* Typedefs to allow easy function pointer passing. */
-typedef void ckh_hash_t (const void *, size_t[2]);
-typedef bool ckh_keycomp_t (const void *, const void *);
+/* Cuckoo hashing implementation. Skip to the end for the interface. */
+
+/******************************************************************************/
+/* INTERNAL DEFINITIONS -- IGNORE */
+/******************************************************************************/
/* Maintain counters used to get an idea of performance. */
-/* #define CKH_COUNT */
+/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
-/* #define CKH_VERBOSE */
+/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
+/* Typedefs to allow easy function pointer passing. */
+typedef void ckh_hash_t (const void *, size_t[2]);
+typedef bool ckh_keycomp_t (const void *, const void *);
/* Hash table cell. */
-struct ckhc_s {
- const void *key;
- const void *data;
-};
+typedef struct {
+ const void *key;
+ const void *data;
+} ckhc_t;
-struct ckh_s {
+/* The hash table itself. */
+typedef struct {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
- uint64_t ngrows;
- uint64_t nshrinks;
- uint64_t nshrinkfails;
- uint64_t ninserts;
- uint64_t nrelocs;
+ uint64_t ngrows;
+ uint64_t nshrinks;
+ uint64_t nshrinkfails;
+ uint64_t ninserts;
+ uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
- uint64_t prng_state;
+ uint64_t prng_state;
/* Total number of items. */
- size_t count;
+ size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
- unsigned lg_minbuckets;
- unsigned lg_curbuckets;
+ unsigned lg_minbuckets;
+ unsigned lg_curbuckets;
/* Hash and comparison functions. */
- ckh_hash_t *hash;
- ckh_keycomp_t *keycomp;
+ ckh_hash_t *hash;
+ ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
- ckhc_t *tab;
-};
+ ckhc_t *tab;
+} ckh_t;
-#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
+/* BEGIN PUBLIC API */
+/******************************************************************************/
-bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+/* Lifetime management. Minitems is the initial capacity. */
+bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
-void ckh_delete(tsd_t *tsd, ckh_t *ckh);
-size_t ckh_count(ckh_t *ckh);
-bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
-bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+void ckh_delete(tsd_t *tsd, ckh_t *ckh);
+
+/* Get the number of elements in the set. */
+size_t ckh_count(ckh_t *ckh);
+
+/*
+ * To iterate over the elements in the table, initialize *tabind to 0 and call
+ * this function until it returns true. Each call that returns false will
+ * update *key and *data to the next element in the table, assuming the pointers
+ * are non-NULL.
+ */
+bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
+
+/*
+ * Basic hash table operations -- insert, removal, lookup. For ckh_remove and
+ * ckh_search, key or data can be NULL. The hash-table only stores pointers to
+ * the key and value, and doesn't do any lifetime management.
+ */
+bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
+bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
-bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
-void ckh_string_hash(const void *key, size_t r_hash[2]);
-bool ckh_string_keycomp(const void *k1, const void *k2);
-void ckh_pointer_hash(const void *key, size_t r_hash[2]);
-bool ckh_pointer_keycomp(const void *k1, const void *k2);
+bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
+/* Some useful hash and comparison functions for strings and pointers. */
+void ckh_string_hash(const void *key, size_t r_hash[2]);
+bool ckh_string_keycomp(const void *k1, const void *k2);
+void ckh_pointer_hash(const void *key, size_t r_hash[2]);
+bool ckh_pointer_keycomp(const void *k1, const void *k2);
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_CKH_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ctl.h b/contrib/jemalloc/include/jemalloc/internal/ctl.h
index af0f6d7c5d7b..f159383d5ca8 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ctl.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ctl.h
@@ -1,88 +1,107 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct ctl_node_s ctl_node_t;
-typedef struct ctl_named_node_s ctl_named_node_t;
-typedef struct ctl_indexed_node_s ctl_indexed_node_t;
-typedef struct ctl_arena_stats_s ctl_arena_stats_t;
-typedef struct ctl_stats_s ctl_stats_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct ctl_node_s {
- bool named;
-};
-
-struct ctl_named_node_s {
- struct ctl_node_s node;
- const char *name;
+#ifndef JEMALLOC_INTERNAL_CTL_H
+#define JEMALLOC_INTERNAL_CTL_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats.h"
+
+/* Maximum ctl tree depth. */
+#define CTL_MAX_DEPTH 7
+
+typedef struct ctl_node_s {
+ bool named;
+} ctl_node_t;
+
+typedef struct ctl_named_node_s {
+ ctl_node_t node;
+ const char *name;
/* If (nchildren == 0), this is a terminal node. */
- unsigned nchildren;
- const ctl_node_t *children;
- int (*ctl)(tsd_t *, const size_t *, size_t, void *,
- size_t *, void *, size_t);
-};
-
-struct ctl_indexed_node_s {
- struct ctl_node_s node;
- const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
+ size_t nchildren;
+ const ctl_node_t *children;
+ int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
size_t);
-};
+} ctl_named_node_t;
-struct ctl_arena_stats_s {
- bool initialized;
- unsigned nthreads;
- const char *dss;
- ssize_t lg_dirty_mult;
- ssize_t decay_time;
- size_t pactive;
- size_t pdirty;
-
- /* The remainder are only populated if config_stats is true. */
+typedef struct ctl_indexed_node_s {
+ struct ctl_node_s node;
+ const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
+ size_t);
+} ctl_indexed_node_t;
- arena_stats_t astats;
+typedef struct ctl_arena_stats_s {
+ arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
- size_t allocated_small;
- uint64_t nmalloc_small;
- uint64_t ndalloc_small;
- uint64_t nrequests_small;
-
- malloc_bin_stats_t bstats[NBINS];
- malloc_large_stats_t *lstats; /* nlclasses elements. */
- malloc_huge_stats_t *hstats; /* nhclasses elements. */
-};
-
-struct ctl_stats_s {
- size_t allocated;
- size_t active;
- size_t metadata;
- size_t resident;
- size_t mapped;
- size_t retained;
- unsigned narenas;
- ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
+ size_t allocated_small;
+ uint64_t nmalloc_small;
+ uint64_t ndalloc_small;
+ uint64_t nrequests_small;
+
+ malloc_bin_stats_t bstats[NBINS];
+ malloc_large_stats_t lstats[NSIZES - NBINS];
+} ctl_arena_stats_t;
+
+typedef struct ctl_stats_s {
+ size_t allocated;
+ size_t active;
+ size_t metadata;
+ size_t resident;
+ size_t mapped;
+ size_t retained;
+
+ background_thread_stats_t background_thread;
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
+} ctl_stats_t;
+
+typedef struct ctl_arena_s ctl_arena_t;
+struct ctl_arena_s {
+ unsigned arena_ind;
+ bool initialized;
+ ql_elm(ctl_arena_t) destroyed_link;
+
+ /* Basic stats, supported even if !config_stats. */
+ unsigned nthreads;
+ const char *dss;
+ ssize_t dirty_decay_ms;
+ ssize_t muzzy_decay_ms;
+ size_t pactive;
+ size_t pdirty;
+ size_t pmuzzy;
+
+ /* NULL if !config_stats. */
+ ctl_arena_stats_t *astats;
};
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+typedef struct ctl_arenas_s {
+ uint64_t epoch;
+ unsigned narenas;
+ ql_head(ctl_arena_t) destroyed;
+
+ /*
+ * Element 0 corresponds to merged stats for extant arenas (accessed via
+ * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
+ * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
+ * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
+ */
+ ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
+} ctl_arenas_t;
+
+int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
-int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
+int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
-int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
-bool ctl_boot(void);
-void ctl_prefork(tsdn_t *tsdn);
-void ctl_postfork_parent(tsdn_t *tsdn);
-void ctl_postfork_child(tsdn_t *tsdn);
+bool ctl_boot(void);
+void ctl_prefork(tsdn_t *tsdn);
+void ctl_postfork_parent(tsdn_t *tsdn);
+void ctl_postfork_child(tsdn_t *tsdn);
-#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
+#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
@@ -92,7 +111,7 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
-#define xmallctlnametomib(name, mibp, miblenp) do { \
+#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
@@ -100,7 +119,7 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
-#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
+#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
@@ -109,10 +128,4 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
+#endif /* JEMALLOC_INTERNAL_CTL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent.h b/contrib/jemalloc/include/jemalloc/internal/extent.h
deleted file mode 100644
index fc77f9f55f3c..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/extent.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct extent_node_s extent_node_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-/* Tree of extents. Use accessor functions for en_* fields. */
-struct extent_node_s {
- /* Arena from which this extent came, if any. */
- arena_t *en_arena;
-
- /* Pointer to the extent that this tree node is responsible for. */
- void *en_addr;
-
- /* Total region size. */
- size_t en_size;
-
- /*
- * Serial number (potentially non-unique).
- *
- * In principle serial numbers can wrap around on 32-bit systems if
- * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
- * back on address comparison for equal serial numbers, stable (if
- * imperfect) ordering is maintained.
- *
- * Serial numbers may not be unique even in the absence of wrap-around,
- * e.g. when splitting an extent and assigning the same serial number to
- * both resulting adjacent extents.
- */
- size_t en_sn;
-
- /*
- * The zeroed flag is used by chunk recycling code to track whether
- * memory is zero-filled.
- */
- bool en_zeroed;
-
- /*
- * True if physical memory is committed to the extent, whether
- * explicitly or implicitly as on a system that overcommits and
- * satisfies physical memory needs on demand via soft page faults.
- */
- bool en_committed;
-
- /*
- * The achunk flag is used to validate that huge allocation lookups
- * don't return arena chunks.
- */
- bool en_achunk;
-
- /* Profile counters, used for huge objects. */
- prof_tctx_t *en_prof_tctx;
-
- /* Linkage for arena's runs_dirty and chunks_cache rings. */
- arena_runs_dirty_link_t rd;
- qr(extent_node_t) cc_link;
-
- union {
- /* Linkage for the size/sn/address-ordered tree. */
- rb_node(extent_node_t) szsnad_link;
-
- /* Linkage for arena's achunks, huge, and node_cache lists. */
- ql_elm(extent_node_t) ql_link;
- };
-
- /* Linkage for the address-ordered tree. */
- rb_node(extent_node_t) ad_link;
-};
-typedef rb_tree(extent_node_t) extent_tree_t;
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#ifdef JEMALLOC_JET
-size_t extent_size_quantize_floor(size_t size);
-#endif
-size_t extent_size_quantize_ceil(size_t size);
-
-rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
-
-rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-arena_t *extent_node_arena_get(const extent_node_t *node);
-void *extent_node_addr_get(const extent_node_t *node);
-size_t extent_node_size_get(const extent_node_t *node);
-size_t extent_node_sn_get(const extent_node_t *node);
-bool extent_node_zeroed_get(const extent_node_t *node);
-bool extent_node_committed_get(const extent_node_t *node);
-bool extent_node_achunk_get(const extent_node_t *node);
-prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
-void extent_node_arena_set(extent_node_t *node, arena_t *arena);
-void extent_node_addr_set(extent_node_t *node, void *addr);
-void extent_node_size_set(extent_node_t *node, size_t size);
-void extent_node_sn_set(extent_node_t *node, size_t sn);
-void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
-void extent_node_committed_set(extent_node_t *node, bool committed);
-void extent_node_achunk_set(extent_node_t *node, bool achunk);
-void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
-void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
- size_t size, size_t sn, bool zeroed, bool committed);
-void extent_node_dirty_linkage_init(extent_node_t *node);
-void extent_node_dirty_insert(extent_node_t *node,
- arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
-void extent_node_dirty_remove(extent_node_t *node);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
-JEMALLOC_INLINE arena_t *
-extent_node_arena_get(const extent_node_t *node)
-{
-
- return (node->en_arena);
-}
-
-JEMALLOC_INLINE void *
-extent_node_addr_get(const extent_node_t *node)
-{
-
- return (node->en_addr);
-}
-
-JEMALLOC_INLINE size_t
-extent_node_size_get(const extent_node_t *node)
-{
-
- return (node->en_size);
-}
-
-JEMALLOC_INLINE size_t
-extent_node_sn_get(const extent_node_t *node)
-{
-
- return (node->en_sn);
-}
-
-JEMALLOC_INLINE bool
-extent_node_zeroed_get(const extent_node_t *node)
-{
-
- return (node->en_zeroed);
-}
-
-JEMALLOC_INLINE bool
-extent_node_committed_get(const extent_node_t *node)
-{
-
- assert(!node->en_achunk);
- return (node->en_committed);
-}
-
-JEMALLOC_INLINE bool
-extent_node_achunk_get(const extent_node_t *node)
-{
-
- return (node->en_achunk);
-}
-
-JEMALLOC_INLINE prof_tctx_t *
-extent_node_prof_tctx_get(const extent_node_t *node)
-{
-
- return (node->en_prof_tctx);
-}
-
-JEMALLOC_INLINE void
-extent_node_arena_set(extent_node_t *node, arena_t *arena)
-{
-
- node->en_arena = arena;
-}
-
-JEMALLOC_INLINE void
-extent_node_addr_set(extent_node_t *node, void *addr)
-{
-
- node->en_addr = addr;
-}
-
-JEMALLOC_INLINE void
-extent_node_size_set(extent_node_t *node, size_t size)
-{
-
- node->en_size = size;
-}
-
-JEMALLOC_INLINE void
-extent_node_sn_set(extent_node_t *node, size_t sn)
-{
-
- node->en_sn = sn;
-}
-
-JEMALLOC_INLINE void
-extent_node_zeroed_set(extent_node_t *node, bool zeroed)
-{
-
- node->en_zeroed = zeroed;
-}
-
-JEMALLOC_INLINE void
-extent_node_committed_set(extent_node_t *node, bool committed)
-{
-
- node->en_committed = committed;
-}
-
-JEMALLOC_INLINE void
-extent_node_achunk_set(extent_node_t *node, bool achunk)
-{
-
- node->en_achunk = achunk;
-}
-
-JEMALLOC_INLINE void
-extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
-{
-
- node->en_prof_tctx = tctx;
-}
-
-JEMALLOC_INLINE void
-extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
- size_t sn, bool zeroed, bool committed)
-{
-
- extent_node_arena_set(node, arena);
- extent_node_addr_set(node, addr);
- extent_node_size_set(node, size);
- extent_node_sn_set(node, sn);
- extent_node_zeroed_set(node, zeroed);
- extent_node_committed_set(node, committed);
- extent_node_achunk_set(node, false);
- if (config_prof)
- extent_node_prof_tctx_set(node, NULL);
-}
-
-JEMALLOC_INLINE void
-extent_node_dirty_linkage_init(extent_node_t *node)
-{
-
- qr_new(&node->rd, rd_link);
- qr_new(node, cc_link);
-}
-
-JEMALLOC_INLINE void
-extent_node_dirty_insert(extent_node_t *node,
- arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
-{
-
- qr_meld(runs_dirty, &node->rd, rd_link);
- qr_meld(chunks_dirty, node, cc_link);
-}
-
-JEMALLOC_INLINE void
-extent_node_dirty_remove(extent_node_t *node)
-{
-
- qr_remove(&node->rd, rd_link);
- qr_remove(node, cc_link);
-}
-
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_dss.h b/contrib/jemalloc/include/jemalloc/internal/extent_dss.h
new file mode 100644
index 000000000000..e8f02ce2ad27
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent_dss.h
@@ -0,0 +1,26 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
+#define JEMALLOC_INTERNAL_EXTENT_DSS_H
+
+typedef enum {
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
+
+ dss_prec_limit = 3
+} dss_prec_t;
+#define DSS_PREC_DEFAULT dss_prec_secondary
+#define DSS_DEFAULT "secondary"
+
+extern const char *dss_prec_names[];
+
+extern const char *opt_dss;
+
+dss_prec_t extent_dss_prec_get(void);
+bool extent_dss_prec_set(dss_prec_t dss_prec);
+void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit);
+bool extent_in_dss(void *addr);
+bool extent_dss_mergeable(void *addr_a, void *addr_b);
+void extent_dss_boot(void);
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_externs.h b/contrib/jemalloc/include/jemalloc/internal/extent_externs.h
new file mode 100644
index 000000000000..489a813c80d9
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent_externs.h
@@ -0,0 +1,72 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
+#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
+
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/rb.h"
+#include "jemalloc/internal/rtree.h"
+
+extern rtree_t extents_rtree;
+extern const extent_hooks_t extent_hooks_default;
+extern mutex_pool_t extent_mutex_pool;
+
+extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
+void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+
+extent_hooks_t *extent_hooks_get(arena_t *arena);
+extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
+ extent_hooks_t *extent_hooks);
+
+#ifdef JEMALLOC_JET
+size_t extent_size_quantize_floor(size_t size);
+size_t extent_size_quantize_ceil(size_t size);
+#endif
+
+rb_proto(, extent_avail_, extent_tree_t, extent_t)
+ph_proto(, extent_heap_, extent_heap_t, extent_t)
+
+bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
+ bool delay_coalesce);
+extent_state_t extents_state_get(const extents_t *extents);
+size_t extents_npages_get(extents_t *extents);
+extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
+ size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
+ bool *zero, bool *commit);
+void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
+extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
+void extents_prefork(tsdn_t *tsdn, extents_t *extents);
+void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
+void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
+extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
+void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent);
+void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent);
+bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
+bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
+
+bool extent_boot(void);
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h b/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h
new file mode 100644
index 000000000000..bb2bd699ed2c
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h
@@ -0,0 +1,407 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
+#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
+
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sz.h"
+
+static inline void
+extent_lock(tsdn_t *tsdn, extent_t *extent) {
+ assert(extent != NULL);
+ mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
+}
+
+static inline void
+extent_unlock(tsdn_t *tsdn, extent_t *extent) {
+ assert(extent != NULL);
+ mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
+}
+
+static inline void
+extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
+ assert(extent1 != NULL && extent2 != NULL);
+ mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
+ (uintptr_t)extent2);
+}
+
+static inline void
+extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
+ assert(extent1 != NULL && extent2 != NULL);
+ mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
+ (uintptr_t)extent2);
+}
+
+static inline arena_t *
+extent_arena_get(const extent_t *extent) {
+ unsigned arena_ind = (unsigned)((extent->e_bits &
+ EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
+ /*
+ * The following check is omitted because we should never actually read
+ * a NULL arena pointer.
+ */
+ if (false && arena_ind >= MALLOCX_ARENA_LIMIT) {
+ return NULL;
+ }
+ assert(arena_ind < MALLOCX_ARENA_LIMIT);
+ return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
+}
+
+static inline szind_t
+extent_szind_get_maybe_invalid(const extent_t *extent) {
+ szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
+ EXTENT_BITS_SZIND_SHIFT);
+ assert(szind <= NSIZES);
+ return szind;
+}
+
+static inline szind_t
+extent_szind_get(const extent_t *extent) {
+ szind_t szind = extent_szind_get_maybe_invalid(extent);
+ assert(szind < NSIZES); /* Never call when "invalid". */
+ return szind;
+}
+
+static inline size_t
+extent_usize_get(const extent_t *extent) {
+ return sz_index2size(extent_szind_get(extent));
+}
+
+static inline size_t
+extent_sn_get(const extent_t *extent) {
+ return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
+ EXTENT_BITS_SN_SHIFT);
+}
+
+static inline extent_state_t
+extent_state_get(const extent_t *extent) {
+ return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
+ EXTENT_BITS_STATE_SHIFT);
+}
+
+static inline bool
+extent_zeroed_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
+ EXTENT_BITS_ZEROED_SHIFT);
+}
+
+static inline bool
+extent_committed_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
+ EXTENT_BITS_COMMITTED_SHIFT);
+}
+
+static inline bool
+extent_slab_get(const extent_t *extent) {
+ return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
+ EXTENT_BITS_SLAB_SHIFT);
+}
+
+static inline unsigned
+extent_nfree_get(const extent_t *extent) {
+ assert(extent_slab_get(extent));
+ return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
+ EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void *
+extent_base_get(const extent_t *extent) {
+ assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
+ !extent_slab_get(extent));
+ return PAGE_ADDR2BASE(extent->e_addr);
+}
+
+static inline void *
+extent_addr_get(const extent_t *extent) {
+ assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
+ !extent_slab_get(extent));
+ return extent->e_addr;
+}
+
+static inline size_t
+extent_size_get(const extent_t *extent) {
+ return (extent->e_size_esn & EXTENT_SIZE_MASK);
+}
+
+static inline size_t
+extent_esn_get(const extent_t *extent) {
+ return (extent->e_size_esn & EXTENT_ESN_MASK);
+}
+
+static inline size_t
+extent_bsize_get(const extent_t *extent) {
+ return extent->e_bsize;
+}
+
+static inline void *
+extent_before_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
+}
+
+static inline void *
+extent_last_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) +
+ extent_size_get(extent) - PAGE);
+}
+
+static inline void *
+extent_past_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) +
+ extent_size_get(extent));
+}
+
+static inline arena_slab_data_t *
+extent_slab_data_get(extent_t *extent) {
+ assert(extent_slab_get(extent));
+ return &extent->e_slab_data;
+}
+
+static inline const arena_slab_data_t *
+extent_slab_data_get_const(const extent_t *extent) {
+ assert(extent_slab_get(extent));
+ return &extent->e_slab_data;
+}
+
+static inline prof_tctx_t *
+extent_prof_tctx_get(const extent_t *extent) {
+ return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
+ ATOMIC_ACQUIRE);
+}
+
+static inline void
+extent_arena_set(extent_t *extent, arena_t *arena) {
+ unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
+ MALLOCX_ARENA_BITS) - 1);
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
+ ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
+}
+
+static inline void
+extent_addr_set(extent_t *extent, void *addr) {
+ extent->e_addr = addr;
+}
+
+static inline void
+extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
+ assert(extent_base_get(extent) == extent_addr_get(extent));
+
+ if (alignment < PAGE) {
+ unsigned lg_range = LG_PAGE -
+ lg_floor(CACHELINE_CEILING(alignment));
+ size_t r =
+ prng_lg_range_zu(&extent_arena_get(extent)->offset_state,
+ lg_range, true);
+ uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
+ lg_range);
+ extent->e_addr = (void *)((uintptr_t)extent->e_addr +
+ random_offset);
+ assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
+ extent->e_addr);
+ }
+}
+
+static inline void
+extent_size_set(extent_t *extent, size_t size) {
+ assert((size & ~EXTENT_SIZE_MASK) == 0);
+ extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
+}
+
+static inline void
+extent_esn_set(extent_t *extent, size_t esn) {
+ extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
+ EXTENT_ESN_MASK);
+}
+
+static inline void
+extent_bsize_set(extent_t *extent, size_t bsize) {
+ extent->e_bsize = bsize;
+}
+
+static inline void
+extent_szind_set(extent_t *extent, szind_t szind) {
+ assert(szind <= NSIZES); /* NSIZES means "invalid". */
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
+ ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
+}
+
+static inline void
+extent_nfree_set(extent_t *extent, unsigned nfree) {
+ assert(extent_slab_get(extent));
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
+ ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_nfree_inc(extent_t *extent) {
+ assert(extent_slab_get(extent));
+ extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_nfree_dec(extent_t *extent) {
+ assert(extent_slab_get(extent));
+ extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
+}
+
+static inline void
+extent_sn_set(extent_t *extent, size_t sn) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
+ ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
+}
+
+static inline void
+extent_state_set(extent_t *extent, extent_state_t state) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
+ ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
+}
+
+static inline void
+extent_zeroed_set(extent_t *extent, bool zeroed) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
+ ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
+}
+
+static inline void
+extent_committed_set(extent_t *extent, bool committed) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
+ ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
+}
+
+static inline void
+extent_slab_set(extent_t *extent, bool slab) {
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
+ ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
+}
+
+static inline void
+extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
+ atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
+}
+
+static inline void
+extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
+ bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
+ bool committed) {
+ assert(addr == PAGE_ADDR2BASE(addr) || !slab);
+
+ extent_arena_set(extent, arena);
+ extent_addr_set(extent, addr);
+ extent_size_set(extent, size);
+ extent_slab_set(extent, slab);
+ extent_szind_set(extent, szind);
+ extent_sn_set(extent, sn);
+ extent_state_set(extent, state);
+ extent_zeroed_set(extent, zeroed);
+ extent_committed_set(extent, committed);
+ ql_elm_new(extent, ql_link);
+ if (config_prof) {
+ extent_prof_tctx_set(extent, NULL);
+ }
+}
+
+static inline void
+extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
+ extent_arena_set(extent, NULL);
+ extent_addr_set(extent, addr);
+ extent_bsize_set(extent, bsize);
+ extent_slab_set(extent, false);
+ extent_szind_set(extent, NSIZES);
+ extent_sn_set(extent, sn);
+ extent_state_set(extent, extent_state_active);
+ extent_zeroed_set(extent, true);
+ extent_committed_set(extent, true);
+}
+
+static inline void
+extent_list_init(extent_list_t *list) {
+ ql_new(list);
+}
+
+static inline extent_t *
+extent_list_first(const extent_list_t *list) {
+ return ql_first(list);
+}
+
+static inline extent_t *
+extent_list_last(const extent_list_t *list) {
+ return ql_last(list, ql_link);
+}
+
+static inline void
+extent_list_append(extent_list_t *list, extent_t *extent) {
+ ql_tail_insert(list, extent, ql_link);
+}
+
+static inline void
+extent_list_replace(extent_list_t *list, extent_t *to_remove,
+ extent_t *to_insert) {
+ ql_after_insert(to_remove, to_insert, ql_link);
+ ql_remove(list, to_remove, ql_link);
+}
+
+static inline void
+extent_list_remove(extent_list_t *list, extent_t *extent) {
+ ql_remove(list, extent, ql_link);
+}
+
+static inline int
+extent_sn_comp(const extent_t *a, const extent_t *b) {
+ size_t a_sn = extent_sn_get(a);
+ size_t b_sn = extent_sn_get(b);
+
+ return (a_sn > b_sn) - (a_sn < b_sn);
+}
+
+static inline int
+extent_esn_comp(const extent_t *a, const extent_t *b) {
+ size_t a_esn = extent_esn_get(a);
+ size_t b_esn = extent_esn_get(b);
+
+ return (a_esn > b_esn) - (a_esn < b_esn);
+}
+
+static inline int
+extent_ad_comp(const extent_t *a, const extent_t *b) {
+ uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
+
+ return (a_addr > b_addr) - (a_addr < b_addr);
+}
+
+static inline int
+extent_ead_comp(const extent_t *a, const extent_t *b) {
+ uintptr_t a_eaddr = (uintptr_t)a;
+ uintptr_t b_eaddr = (uintptr_t)b;
+
+ return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
+}
+
+static inline int
+extent_snad_comp(const extent_t *a, const extent_t *b) {
+ int ret;
+
+ ret = extent_sn_comp(a, b);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = extent_ad_comp(a, b);
+ return ret;
+}
+
+static inline int
+extent_esnead_comp(const extent_t *a, const extent_t *b) {
+ int ret;
+
+ ret = extent_esn_comp(a, b);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = extent_ead_comp(a, b);
+ return ret;
+}
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_mmap.h b/contrib/jemalloc/include/jemalloc/internal/extent_mmap.h
new file mode 100644
index 000000000000..55f17ee4876a
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent_mmap.h
@@ -0,0 +1,10 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
+#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
+
+extern bool opt_retain;
+
+void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit);
+bool extent_dalloc_mmap(void *addr, size_t size);
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_structs.h b/contrib/jemalloc/include/jemalloc/internal/extent_structs.h
new file mode 100644
index 000000000000..d2979503458d
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent_structs.h
@@ -0,0 +1,199 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
+#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/rb.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/size_classes.h"
+
+typedef enum {
+ extent_state_active = 0,
+ extent_state_dirty = 1,
+ extent_state_muzzy = 2,
+ extent_state_retained = 3
+} extent_state_t;
+
+/* Extent (span of pages). Use accessor functions for e_* fields. */
+struct extent_s {
+ /*
+ * Bitfield containing several fields:
+ *
+ * a: arena_ind
+ * b: slab
+ * c: committed
+ * z: zeroed
+ * t: state
+ * i: szind
+ * f: nfree
+ * n: sn
+ *
+ * nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
+ *
+ * arena_ind: Arena from which this extent came, or all 1 bits if
+ * unassociated.
+ *
+ * slab: The slab flag indicates whether the extent is used for a slab
+ * of small regions. This helps differentiate small size classes,
+ * and it indicates whether interior pointers can be looked up via
+ * iealloc().
+ *
+ * committed: The committed flag indicates whether physical memory is
+ * committed to the extent, whether explicitly or implicitly
+ * as on a system that overcommits and satisfies physical
+ * memory needs on demand via soft page faults.
+ *
+ * zeroed: The zeroed flag is used by extent recycling code to track
+ * whether memory is zero-filled.
+ *
+ * state: The state flag is an extent_state_t.
+ *
+ * szind: The szind flag indicates usable size class index for
+ * allocations residing in this extent, regardless of whether the
+ * extent is a slab. Extent size and usable size often differ
+ * even for non-slabs, either due to sz_large_pad or promotion of
+ * sampled small regions.
+ *
+ * nfree: Number of free regions in slab.
+ *
+ * sn: Serial number (potentially non-unique).
+ *
+ * Serial numbers may wrap around if !opt_retain, but as long as
+ * comparison functions fall back on address comparison for equal
+ * serial numbers, stable (if imperfect) ordering is maintained.
+ *
+ * Serial numbers may not be unique even in the absence of
+ * wrap-around, e.g. when splitting an extent and assigning the same
+ * serial number to both resulting adjacent extents.
+ */
+ uint64_t e_bits;
+#define EXTENT_BITS_ARENA_SHIFT 0
+#define EXTENT_BITS_ARENA_MASK \
+ (((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
+
+#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
+#define EXTENT_BITS_SLAB_MASK \
+ ((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
+
+#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
+#define EXTENT_BITS_COMMITTED_MASK \
+ ((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
+
+#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
+#define EXTENT_BITS_ZEROED_MASK \
+ ((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
+
+#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
+#define EXTENT_BITS_STATE_MASK \
+ ((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
+
+#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
+#define EXTENT_BITS_SZIND_MASK \
+ (((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
+
+#define EXTENT_BITS_NFREE_SHIFT \
+ (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
+#define EXTENT_BITS_NFREE_MASK \
+ ((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
+
+#define EXTENT_BITS_SN_SHIFT \
+ (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
+#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
+
+ /* Pointer to the extent that this structure is responsible for. */
+ void *e_addr;
+
+ union {
+ /*
+ * Extent size and serial number associated with the extent
+ * structure (different than the serial number for the extent at
+ * e_addr).
+ *
+ * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
+ */
+ size_t e_size_esn;
+ #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
+ #define EXTENT_ESN_MASK ((size_t)PAGE-1)
+ /* Base extent size, which may not be a multiple of PAGE. */
+ size_t e_bsize;
+ };
+
+ union {
+ /*
+ * List linkage, used by a variety of lists:
+ * - arena_bin_t's slabs_full
+ * - extents_t's LRU
+ * - stashed dirty extents
+ * - arena's large allocations
+ */
+ ql_elm(extent_t) ql_link;
+ /* Red-black tree linkage, used by arena's extent_avail. */
+ rb_node(extent_t) rb_link;
+ };
+
+ /* Linkage for per size class sn/address-ordered heaps. */
+ phn(extent_t) ph_link;
+
+ union {
+ /* Small region slab metadata. */
+ arena_slab_data_t e_slab_data;
+
+ /*
+ * Profile counters, used for large objects. Points to a
+ * prof_tctx_t.
+ */
+ atomic_p_t e_prof_tctx;
+ };
+};
+typedef ql_head(extent_t) extent_list_t;
+typedef rb_tree(extent_t) extent_tree_t;
+typedef ph(extent_t) extent_heap_t;
+
+/* Quantized collection of extents, with built-in LRU queue. */
+struct extents_s {
+ malloc_mutex_t mtx;
+
+ /*
+ * Quantized per size class heaps of extents.
+ *
+ * Synchronization: mtx.
+ */
+ extent_heap_t heaps[NPSIZES+1];
+
+ /*
+ * Bitmap for which set bits correspond to non-empty heaps.
+ *
+ * Synchronization: mtx.
+ */
+ bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
+
+ /*
+ * LRU of all extents in heaps.
+ *
+ * Synchronization: mtx.
+ */
+ extent_list_t lru;
+
+ /*
+ * Page sum for all extents in heaps.
+ *
+ * The synchronization here is a little tricky. Modifications to npages
+ * must hold mtx, but reads need not (though, a reader who sees npages
+ * without holding the mutex can't assume anything about the rest of the
+ * state of the extents_t).
+ */
+ atomic_zu_t npages;
+
+ /* All stored extents must be in the same state. */
+ extent_state_t state;
+
+ /*
+ * If true, delay coalescing until eviction; otherwise coalesce during
+ * deallocation.
+ */
+ bool delay_coalesce;
+};
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_types.h b/contrib/jemalloc/include/jemalloc/internal/extent_types.h
new file mode 100644
index 000000000000..b6905ce10555
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent_types.h
@@ -0,0 +1,9 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
+#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
+
+typedef struct extent_s extent_t;
+typedef struct extents_s extents_t;
+
+#define EXTENT_HOOKS_INITIALIZER NULL
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/hash.h b/contrib/jemalloc/include/jemalloc/internal/hash.h
index 1ff2d9a05f59..188296cf0e28 100644
--- a/contrib/jemalloc/include/jemalloc/internal/hash.h
+++ b/contrib/jemalloc/include/jemalloc/internal/hash.h
@@ -1,109 +1,76 @@
+#ifndef JEMALLOC_INTERNAL_HASH_H
+#define JEMALLOC_INTERNAL_HASH_H
+
+#include "jemalloc/internal/assert.h"
+
/*
* The following hash function is based on MurmurHash3, placed into the public
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
* details.
*/
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
-void hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2]);
-void hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2]);
-void hash(const void *key, size_t len, const uint32_t seed,
- size_t r_hash[2]);
-#endif
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/******************************************************************************/
/* Internal implementation. */
-JEMALLOC_INLINE uint32_t
-hash_rotl_32(uint32_t x, int8_t r)
-{
-
+static inline uint32_t
+hash_rotl_32(uint32_t x, int8_t r) {
return ((x << r) | (x >> (32 - r)));
}
-JEMALLOC_INLINE uint64_t
-hash_rotl_64(uint64_t x, int8_t r)
-{
-
+static inline uint64_t
+hash_rotl_64(uint64_t x, int8_t r) {
return ((x << r) | (x >> (64 - r)));
}
-JEMALLOC_INLINE uint32_t
-hash_get_block_32(const uint32_t *p, int i)
-{
-
+static inline uint32_t
+hash_get_block_32(const uint32_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
- return (ret);
+ return ret;
}
- return (p[i]);
+ return p[i];
}
-JEMALLOC_INLINE uint64_t
-hash_get_block_64(const uint64_t *p, int i)
-{
-
+static inline uint64_t
+hash_get_block_64(const uint64_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
- return (ret);
+ return ret;
}
- return (p[i]);
+ return p[i];
}
-JEMALLOC_INLINE uint32_t
-hash_fmix_32(uint32_t h)
-{
-
+static inline uint32_t
+hash_fmix_32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
- return (h);
+ return h;
}
-JEMALLOC_INLINE uint64_t
-hash_fmix_64(uint64_t k)
-{
-
+static inline uint64_t
+hash_fmix_64(uint64_t k) {
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
- return (k);
+ return k;
}
-JEMALLOC_INLINE uint32_t
-hash_x86_32(const void *key, int len, uint32_t seed)
-{
+static inline uint32_t
+hash_x86_32(const void *key, int len, uint32_t seed) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
@@ -149,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
h1 = hash_fmix_32(h1);
- return (h1);
+ return h1;
}
-UNUSED JEMALLOC_INLINE void
+UNUSED static inline void
hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2])
-{
+ uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -254,10 +220,9 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
-UNUSED JEMALLOC_INLINE void
+UNUSED static inline void
hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2])
-{
+ uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -334,10 +299,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/******************************************************************************/
/* API. */
-JEMALLOC_INLINE void
-hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
-{
-
+static inline void
+hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
@@ -351,7 +314,5 @@ hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
}
#endif
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_HASH_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/hooks.h b/contrib/jemalloc/include/jemalloc/internal/hooks.h
new file mode 100644
index 000000000000..85e2a9914e39
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/hooks.h
@@ -0,0 +1,12 @@
+#ifndef JEMALLOC_INTERNAL_HOOKS_H
+#define JEMALLOC_INTERNAL_HOOKS_H
+
+extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
+extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
+
+#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
+
+/* Note that this is undef'd and re-define'd in src/prof.c. */
+#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
+
+#endif /* JEMALLOC_INTERNAL_HOOKS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/huge.h b/contrib/jemalloc/include/jemalloc/internal/huge.h
deleted file mode 100644
index 22184d9bbd45..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/huge.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
-void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero);
-bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero);
-void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t usize, size_t alignment, bool zero, tcache_t *tcache);
-#ifdef JEMALLOC_JET
-typedef void (huge_dalloc_junk_t)(void *, size_t);
-extern huge_dalloc_junk_t *huge_dalloc_junk;
-#endif
-void huge_dalloc(tsdn_t *tsdn, void *ptr);
-arena_t *huge_aalloc(const void *ptr);
-size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
-prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
-void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
deleted file mode 100644
index 7e72c0714658..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ /dev/null
@@ -1,1291 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_H
-#define JEMALLOC_INTERNAL_H
-
-#include "jemalloc_internal_defs.h"
-#include "jemalloc/internal/jemalloc_internal_decls.h"
-
-#ifdef JEMALLOC_UTRACE
-#include <sys/ktrace.h>
-#endif
-
-#include "un-namespace.h"
-#include "libc_private.h"
-
-#define JEMALLOC_NO_DEMANGLE
-#ifdef JEMALLOC_JET
-# define JEMALLOC_N(n) jet_##n
-# include "jemalloc/internal/public_namespace.h"
-# define JEMALLOC_NO_RENAME
-# include "../jemalloc.h"
-# undef JEMALLOC_NO_RENAME
-#else
-# define JEMALLOC_N(n) __je_##n
-# include "../jemalloc.h"
-#endif
-#include "jemalloc/internal/private_namespace.h"
-
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
- true
-#else
- false
-#endif
- ;
-static const bool have_dss =
-#ifdef JEMALLOC_DSS
- true
-#else
- false
-#endif
- ;
-static const bool config_fill =
-#ifdef JEMALLOC_FILL
- true
-#else
- false
-#endif
- ;
-static const bool config_lazy_lock = true;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
-static const bool config_prof =
-#ifdef JEMALLOC_PROF
- true
-#else
- false
-#endif
- ;
-static const bool config_prof_libgcc =
-#ifdef JEMALLOC_PROF_LIBGCC
- true
-#else
- false
-#endif
- ;
-static const bool config_prof_libunwind =
-#ifdef JEMALLOC_PROF_LIBUNWIND
- true
-#else
- false
-#endif
- ;
-static const bool maps_coalesce =
-#ifdef JEMALLOC_MAPS_COALESCE
- true
-#else
- false
-#endif
- ;
-static const bool config_munmap =
-#ifdef JEMALLOC_MUNMAP
- true
-#else
- false
-#endif
- ;
-static const bool config_stats =
-#ifdef JEMALLOC_STATS
- true
-#else
- false
-#endif
- ;
-static const bool config_tcache =
-#ifdef JEMALLOC_TCACHE
- true
-#else
- false
-#endif
- ;
-static const bool config_thp =
-#ifdef JEMALLOC_THP
- true
-#else
- false
-#endif
- ;
-static const bool config_tls =
-#ifdef JEMALLOC_TLS
- true
-#else
- false
-#endif
- ;
-static const bool config_utrace =
-#ifdef JEMALLOC_UTRACE
- true
-#else
- false
-#endif
- ;
-static const bool config_valgrind =
-#ifdef JEMALLOC_VALGRIND
- true
-#else
- false
-#endif
- ;
-static const bool config_xmalloc =
-#ifdef JEMALLOC_XMALLOC
- true
-#else
- false
-#endif
- ;
-static const bool config_ivsalloc =
-#ifdef JEMALLOC_IVSALLOC
- true
-#else
- false
-#endif
- ;
-static const bool config_cache_oblivious =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- true
-#else
- false
-#endif
- ;
-
-#ifdef JEMALLOC_C11ATOMICS
-#include <stdatomic.h>
-#endif
-
-#ifdef JEMALLOC_ATOMIC9
-#include <machine/atomic.h>
-#endif
-
-#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
-#include <libkern/OSAtomic.h>
-#endif
-
-#ifdef JEMALLOC_ZONE
-#include <mach/mach_error.h>
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
-#endif
-
-#include "jemalloc/internal/ph.h"
-#ifndef __PGI
-#define RB_COMPACT
-#endif
-#include "jemalloc/internal/rb.h"
-#include "jemalloc/internal/qr.h"
-#include "jemalloc/internal/ql.h"
-
-/*
- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
- * but there are circular dependencies that cannot be broken without
- * substantial performance degradation. In order to reduce the effect on
- * visual code flow, read the header files in multiple passes, with one of the
- * following cpp variables defined during each pass:
- *
- * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
- * types.
- * JEMALLOC_H_STRUCTS : Data structures.
- * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
- * JEMALLOC_H_INLINES : Inline functions.
- */
-/******************************************************************************/
-#define JEMALLOC_H_TYPES
-
-#include "jemalloc/internal/jemalloc_internal_macros.h"
-
-/* Page size index type. */
-typedef unsigned pszind_t;
-
-/* Size class index type. */
-typedef unsigned szind_t;
-
-/*
- * Flags bits:
- *
- * a: arena
- * t: tcache
- * 0: unused
- * z: zero
- * n: alignment
- *
- * aaaaaaaa aaaatttt tttttttt 0znnnnnn
- */
-#define MALLOCX_ARENA_MASK ((int)~0xfffff)
-#define MALLOCX_ARENA_MAX 0xffe
-#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
-#define MALLOCX_TCACHE_MAX 0xffd
-#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
-/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
-#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
- (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
-#define MALLOCX_ALIGN_GET(flags) \
- (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
-#define MALLOCX_ZERO_GET(flags) \
- ((bool)(flags & MALLOCX_ZERO))
-
-#define MALLOCX_TCACHE_GET(flags) \
- (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
-#define MALLOCX_ARENA_GET(flags) \
- (((unsigned)(((unsigned)flags) >> 20)) - 1)
-
-/* Smallest size class to support. */
-#define TINY_MIN (1U << LG_TINY_MIN)
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#ifndef LG_QUANTUM
-# if (defined(__i386__) || defined(_M_IX86))
-# define LG_QUANTUM 4
-# endif
-# ifdef __ia64__
-# define LG_QUANTUM 4
-# endif
-# ifdef __alpha__
-# define LG_QUANTUM 4
-# endif
-# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
-# define LG_QUANTUM 4
-# endif
-# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
-# define LG_QUANTUM 4
-# endif
-# ifdef __arm__
-# define LG_QUANTUM 3
-# endif
-# ifdef __aarch64__
-# define LG_QUANTUM 4
-# endif
-# ifdef __hppa__
-# define LG_QUANTUM 4
-# endif
-# ifdef __mips__
-# define LG_QUANTUM 3
-# endif
-# ifdef __or1k__
-# define LG_QUANTUM 3
-# endif
-# ifdef __powerpc__
-# define LG_QUANTUM 4
-# endif
-# ifdef __riscv__
-# define LG_QUANTUM 4
-# endif
-# ifdef __s390__
-# define LG_QUANTUM 4
-# endif
-# ifdef __SH4__
-# define LG_QUANTUM 4
-# endif
-# ifdef __tile__
-# define LG_QUANTUM 4
-# endif
-# ifdef __le32__
-# define LG_QUANTUM 4
-# endif
-# ifndef LG_QUANTUM
-# error "Unknown minimum alignment for architecture; specify via "
- "--with-lg-quantum"
-# endif
-#endif
-
-#define QUANTUM ((size_t)(1U << LG_QUANTUM))
-#define QUANTUM_MASK (QUANTUM - 1)
-
-/* Return the smallest quantum multiple that is >= a. */
-#define QUANTUM_CEILING(a) \
- (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
-
-#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
-#define LONG_MASK (LONG - 1)
-
-/* Return the smallest long multiple that is >= a. */
-#define LONG_CEILING(a) \
- (((a) + LONG_MASK) & ~LONG_MASK)
-
-#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
-#define PTR_MASK (SIZEOF_PTR - 1)
-
-/* Return the smallest (void *) multiple that is >= a. */
-#define PTR_CEILING(a) \
- (((a) + PTR_MASK) & ~PTR_MASK)
-
-/*
- * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
- * In addition, this controls the spacing of cacheline-spaced size classes.
- *
- * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
- * only handle raw constants.
- */
-#define LG_CACHELINE 6
-#define CACHELINE 64
-#define CACHELINE_MASK (CACHELINE - 1)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define CACHELINE_CEILING(s) \
- (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-
-/* Page size. LG_PAGE is determined by the configure script. */
-#ifdef PAGE_MASK
-# undef PAGE_MASK
-#endif
-#define PAGE ((size_t)(1U << LG_PAGE))
-#define PAGE_MASK ((size_t)(PAGE - 1))
-
-/* Return the page base address for the page containing address a. */
-#define PAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~PAGE_MASK))
-
-/* Return the smallest pagesize multiple that is >= s. */
-#define PAGE_CEILING(s) \
- (((s) + PAGE_MASK) & ~PAGE_MASK)
-
-/* Return the nearest aligned address at or below a. */
-#define ALIGNMENT_ADDR2BASE(a, alignment) \
- ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
-
-/* Return the offset between a and the nearest aligned address at or below a. */
-#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
- ((size_t)((uintptr_t)(a) & (alignment - 1)))
-
-/* Return the smallest alignment multiple that is >= s. */
-#define ALIGNMENT_CEILING(s, alignment) \
- (((s) + (alignment - 1)) & ((~(alignment)) + 1))
-
-/* Declare a variable-length array. */
-#if __STDC_VERSION__ < 199901L
-# ifdef _MSC_VER
-# include <malloc.h>
-# define alloca _alloca
-# else
-# ifdef JEMALLOC_HAS_ALLOCA_H
-# include <alloca.h>
-# else
-# include <stdlib.h>
-# endif
-# endif
-# define VARIABLE_ARRAY(type, name, count) \
- type *name = alloca(sizeof(type) * (count))
-#else
-# define VARIABLE_ARRAY(type, name, count) type name[(count)]
-#endif
-
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/valgrind.h"
-#include "jemalloc/internal/util.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/stats.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/tsd.h"
-#include "jemalloc/internal/mb.h"
-#include "jemalloc/internal/extent.h"
-#include "jemalloc/internal/arena.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/base.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/chunk.h"
-#include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/tcache.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/quarantine.h"
-#include "jemalloc/internal/prof.h"
-
-#undef JEMALLOC_H_TYPES
-/******************************************************************************/
-#define JEMALLOC_H_STRUCTS
-
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/valgrind.h"
-#include "jemalloc/internal/util.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/stats.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mb.h"
-#include "jemalloc/internal/bitmap.h"
-#define JEMALLOC_ARENA_STRUCTS_A
-#include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_STRUCTS_A
-#include "jemalloc/internal/extent.h"
-#define JEMALLOC_ARENA_STRUCTS_B
-#include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_STRUCTS_B
-#include "jemalloc/internal/base.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/chunk.h"
-#include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/tcache.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/quarantine.h"
-#include "jemalloc/internal/prof.h"
-
-#include "jemalloc/internal/tsd.h"
-
-#undef JEMALLOC_H_STRUCTS
-/******************************************************************************/
-#define JEMALLOC_H_EXTERNS
-
-extern bool opt_abort;
-extern const char *opt_junk;
-extern bool opt_junk_alloc;
-extern bool opt_junk_free;
-extern size_t opt_quarantine;
-extern bool opt_redzone;
-extern bool opt_utrace;
-extern bool opt_xmalloc;
-extern bool opt_zero;
-extern unsigned opt_narenas;
-
-extern bool in_valgrind;
-
-/* Number of CPUs. */
-extern unsigned ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned narenas_auto;
-
-/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern arena_t **arenas;
-
-/*
- * pind2sz_tab encodes the same information as could be computed by
- * pind2sz_compute().
- */
-extern size_t const pind2sz_tab[NPSIZES];
-/*
- * index2size_tab encodes the same information as could be computed (at
- * unacceptable cost in some code paths) by index2size_compute().
- */
-extern size_t const index2size_tab[NSIZES];
-/*
- * size2index_tab is a compact lookup table that rounds request sizes up to
- * size classes. In order to reduce cache footprint, the table is compressed,
- * and all accesses are via size2index().
- */
-extern uint8_t const size2index_tab[];
-
-arena_t *a0get(void);
-void *a0malloc(size_t size);
-void a0dalloc(void *ptr);
-void *bootstrap_malloc(size_t size);
-void *bootstrap_calloc(size_t num, size_t size);
-void bootstrap_free(void *ptr);
-unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
-arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
-void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
-void thread_allocated_cleanup(tsd_t *tsd);
-void thread_deallocated_cleanup(tsd_t *tsd);
-void iarena_cleanup(tsd_t *tsd);
-void arena_cleanup(tsd_t *tsd);
-void arenas_tdata_cleanup(tsd_t *tsd);
-void narenas_tdata_cleanup(tsd_t *tsd);
-void arenas_tdata_bypass_cleanup(tsd_t *tsd);
-void jemalloc_prefork(void);
-void jemalloc_postfork_parent(void);
-void jemalloc_postfork_child(void);
-
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/valgrind.h"
-#include "jemalloc/internal/util.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/stats.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mb.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/extent.h"
-#include "jemalloc/internal/arena.h"
-#include "jemalloc/internal/base.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/chunk.h"
-#include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/tcache.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/quarantine.h"
-#include "jemalloc/internal/prof.h"
-#include "jemalloc/internal/tsd.h"
-
-#undef JEMALLOC_H_EXTERNS
-/******************************************************************************/
-#define JEMALLOC_H_INLINES
-
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/valgrind.h"
-#include "jemalloc/internal/util.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/stats.h"
-#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/tsd.h"
-#include "jemalloc/internal/witness.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mb.h"
-#include "jemalloc/internal/extent.h"
-#include "jemalloc/internal/base.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/chunk.h"
-#include "jemalloc/internal/huge.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-pszind_t psz2ind(size_t psz);
-size_t pind2sz_compute(pszind_t pind);
-size_t pind2sz_lookup(pszind_t pind);
-size_t pind2sz(pszind_t pind);
-size_t psz2u(size_t psz);
-szind_t size2index_compute(size_t size);
-szind_t size2index_lookup(size_t size);
-szind_t size2index(size_t size);
-size_t index2size_compute(szind_t index);
-size_t index2size_lookup(szind_t index);
-size_t index2size(szind_t index);
-size_t s2u_compute(size_t size);
-size_t s2u_lookup(size_t size);
-size_t s2u(size_t size);
-size_t sa2u(size_t size, size_t alignment);
-arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
-arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
-arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
-arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
- bool refresh_if_missing);
-arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
-ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE pszind_t
-psz2ind(size_t psz)
-{
-
- if (unlikely(psz > HUGE_MAXCLASS))
- return (NPSIZES);
- {
- pszind_t x = lg_floor((psz<<1)-1);
- pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
- (LG_SIZE_CLASS_GROUP + LG_PAGE);
- pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
- pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-
- size_t delta_inverse_mask = ZI(-1) << lg_delta;
- pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- pszind_t ind = grp + mod;
- return (ind);
- }
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_compute(pszind_t pind)
-{
-
- {
- size_t grp = pind >> LG_SIZE_CLASS_GROUP;
- size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_PAGE +
- (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_PAGE-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t sz = grp_size + mod_size;
- return (sz);
- }
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_lookup(pszind_t pind)
-{
- size_t ret = (size_t)pind2sz_tab[pind];
- assert(ret == pind2sz_compute(pind));
- return (ret);
-}
-
-JEMALLOC_INLINE size_t
-pind2sz(pszind_t pind)
-{
-
- assert(pind < NPSIZES);
- return (pind2sz_lookup(pind));
-}
-
-JEMALLOC_INLINE size_t
-psz2u(size_t psz)
-{
-
- if (unlikely(psz > HUGE_MAXCLASS))
- return (0);
- {
- size_t x = lg_floor((psz<<1)-1);
- size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (psz + delta_mask) & ~delta_mask;
- return (usize);
- }
-}
-
-JEMALLOC_INLINE szind_t
-size2index_compute(size_t size)
-{
-
- if (unlikely(size > HUGE_MAXCLASS))
- return (NSIZES);
-#if (NTBINS != 0)
- if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
- szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
- }
-#endif
- {
- szind_t x = lg_floor((size<<1)-1);
- szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
- x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
- szind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
- szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
-
- size_t delta_inverse_mask = ZI(-1) << lg_delta;
- szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- szind_t index = NTBINS + grp + mod;
- return (index);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index_lookup(size_t size)
-{
-
- assert(size <= LOOKUP_MAXCLASS);
- {
- szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
- assert(ret == size2index_compute(size));
- return (ret);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-size2index(size_t size)
-{
-
- assert(size > 0);
- if (likely(size <= LOOKUP_MAXCLASS))
- return (size2index_lookup(size));
- return (size2index_compute(size));
-}
-
-JEMALLOC_INLINE size_t
-index2size_compute(szind_t index)
-{
-
-#if (NTBINS > 0)
- if (index < NTBINS)
- return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
-#endif
- {
- size_t reduced_index = index - NTBINS;
- size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
- size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
- 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_QUANTUM +
- (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_QUANTUM-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t usize = grp_size + mod_size;
- return (usize);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(szind_t index)
-{
- size_t ret = (size_t)index2size_tab[index];
- assert(ret == index2size_compute(index));
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-index2size(szind_t index)
-{
-
- assert(index < NSIZES);
- return (index2size_lookup(index));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_compute(size_t size)
-{
-
- if (unlikely(size > HUGE_MAXCLASS))
- return (0);
-#if (NTBINS > 0)
- if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
- size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
- (ZU(1) << lg_ceil));
- }
-#endif
- {
- size_t x = lg_floor((size<<1)-1);
- size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (size + delta_mask) & ~delta_mask;
- return (usize);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-s2u_lookup(size_t size)
-{
- size_t ret = index2size_lookup(size2index_lookup(size));
-
- assert(ret == s2u_compute(size));
- return (ret);
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-s2u(size_t size)
-{
-
- assert(size > 0);
- if (likely(size <= LOOKUP_MAXCLASS))
- return (s2u_lookup(size));
- return (s2u_compute(size));
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size and alignment.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sa2u(size_t size, size_t alignment)
-{
- size_t usize;
-
- assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
-
- /* Try for a small size class. */
- if (size <= SMALL_MAXCLASS && alignment < PAGE) {
- /*
- * Round size up to the nearest multiple of alignment.
- *
- * This done, we can take advantage of the fact that for each
- * small size class, every object is aligned at the smallest
- * power of two that is non-zero in the base two representation
- * of the size. For example:
- *
- * Size | Base 2 | Minimum alignment
- * -----+----------+------------------
- * 96 | 1100000 | 32
- * 144 | 10100000 | 32
- * 192 | 11000000 | 64
- */
- usize = s2u(ALIGNMENT_CEILING(size, alignment));
- if (usize < LARGE_MINCLASS)
- return (usize);
- }
-
- /* Try for a large size class. */
- if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
- /*
- * We can't achieve subpage alignment, so round up alignment
- * to the minimum that can actually be supported.
- */
- alignment = PAGE_CEILING(alignment);
-
- /* Make sure result is a large size class. */
- usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
-
- /*
- * Calculate the size of the over-size run that arena_palloc()
- * would need to allocate in order to guarantee the alignment.
- */
- if (usize + large_pad + alignment - PAGE <= arena_maxrun)
- return (usize);
- }
-
- /* Huge size class. Beware of overflow. */
-
- if (unlikely(alignment > HUGE_MAXCLASS))
- return (0);
-
- /*
- * We can't achieve subchunk alignment, so round up alignment to the
- * minimum that can actually be supported.
- */
- alignment = CHUNK_CEILING(alignment);
-
- /* Make sure result is a huge size class. */
- if (size <= chunksize)
- usize = chunksize;
- else {
- usize = s2u(size);
- if (usize < size) {
- /* size_t overflow. */
- return (0);
- }
- }
-
- /*
- * Calculate the multi-chunk mapping that huge_palloc() would need in
- * order to guarantee the alignment.
- */
- if (usize + alignment - PAGE < usize) {
- /* size_t overflow. */
- return (0);
- }
- return (usize);
-}
-
-/* Choose an arena based on a per-thread value. */
-JEMALLOC_INLINE arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
-{
- arena_t *ret;
-
- if (arena != NULL)
- return (arena);
-
- ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
- if (unlikely(ret == NULL))
- ret = arena_choose_hard(tsd, internal);
-
- return (ret);
-}
-
-JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
-{
-
- return (arena_choose_impl(tsd, arena, false));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena)
-{
-
- return (arena_choose_impl(tsd, arena, true));
-}
-
-JEMALLOC_INLINE arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
-{
- arena_tdata_t *tdata;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
- if (unlikely(arenas_tdata == NULL)) {
- /* arenas_tdata hasn't been initialized yet. */
- return (arena_tdata_get_hard(tsd, ind));
- }
- if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
- /*
- * ind is invalid, cache is old (too small), or tdata to be
- * initialized.
- */
- return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
- NULL);
- }
-
- tdata = &arenas_tdata[ind];
- if (likely(tdata != NULL) || !refresh_if_missing)
- return (tdata);
- return (arena_tdata_get_hard(tsd, ind));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
-{
- arena_t *ret;
-
- assert(ind <= MALLOCX_ARENA_MAX);
-
- ret = arenas[ind];
- if (unlikely(ret == NULL)) {
- ret = atomic_read_p((void *)&arenas[ind]);
- if (init_if_missing && unlikely(ret == NULL))
- ret = arena_init(tsdn, ind);
- }
- return (ret);
-}
-
-JEMALLOC_INLINE ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind)
-{
- arena_tdata_t *tdata;
-
- tdata = arena_tdata_get(tsd, ind, true);
- if (unlikely(tdata == NULL))
- return (NULL);
- return (&tdata->decay_ticker);
-}
-#endif
-
-#include "jemalloc/internal/bitmap.h"
-/*
- * Include portions of arena.h interleaved with tcache.h in order to resolve
- * circular dependencies.
- */
-#define JEMALLOC_ARENA_INLINE_A
-#include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_INLINE_A
-#include "jemalloc/internal/tcache.h"
-#define JEMALLOC_ARENA_INLINE_B
-#include "jemalloc/internal/arena.h"
-#undef JEMALLOC_ARENA_INLINE_B
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/quarantine.h"
-
-#ifndef JEMALLOC_ENABLE_INLINE
-arena_t *iaalloc(const void *ptr);
-size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
-void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
-void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
- bool slow_path);
-void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, bool is_metadata, arena_t *arena);
-void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena);
-void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
-size_t u2rz(size_t usize);
-size_t p2rz(tsdn_t *tsdn, const void *ptr);
-void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
- bool slow_path);
-void idalloc(tsd_t *tsd, void *ptr);
-void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
-void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
-void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, tcache_t *tcache,
- arena_t *arena);
-void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
-void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero);
-bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(const void *ptr)
-{
-
- assert(ptr != NULL);
-
- return (arena_aalloc(ptr));
-}
-
-/*
- * Typical usage:
- * tsdn_t *tsdn = [...]
- * void *ptr = [...]
- * size_t sz = isalloc(tsdn, ptr, config_prof);
- */
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
-{
-
- assert(ptr != NULL);
- /* Demotion only makes sense if config_prof is true. */
- assert(config_prof || !demote);
-
- return (arena_salloc(tsdn, ptr, demote));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
- bool is_metadata, arena_t *arena, bool slow_path)
-{
- void *ret;
-
- assert(size != 0);
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
-
- ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
- if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret),
- isalloc(tsdn, ret, config_prof));
- }
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
-{
-
- return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
- false, NULL, slow_path));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, bool is_metadata, arena_t *arena)
-{
- void *ret;
-
- assert(usize != 0);
- assert(usize == sa2u(usize, alignment));
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
-
- ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
- assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
- if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
- config_prof));
- }
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena)
-{
-
- return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
-{
-
- return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
- tcache_get(tsd, true), false, NULL));
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
-{
- extent_node_t *node;
-
- /* Return 0 if ptr is not within a chunk managed by jemalloc. */
- node = chunk_lookup(ptr, false);
- if (node == NULL)
- return (0);
- /* Only arena chunks should be looked up via interior pointers. */
- assert(extent_node_addr_get(node) == ptr ||
- extent_node_achunk_get(node));
-
- return (isalloc(tsdn, ptr, demote));
-}
-
-JEMALLOC_INLINE size_t
-u2rz(size_t usize)
-{
- size_t ret;
-
- if (usize <= SMALL_MAXCLASS) {
- szind_t binind = size2index(usize);
- ret = arena_bin_info[binind].redzone_size;
- } else
- ret = 0;
-
- return (ret);
-}
-
-JEMALLOC_INLINE size_t
-p2rz(tsdn_t *tsdn, const void *ptr)
-{
- size_t usize = isalloc(tsdn, ptr, false);
-
- return (u2rz(usize));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
- bool slow_path)
-{
-
- assert(ptr != NULL);
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
- if (config_stats && is_metadata) {
- arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
- config_prof));
- }
-
- arena_dalloc(tsdn, ptr, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloc(tsd_t *tsd, void *ptr)
-{
-
- idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
-{
-
- if (slow_path && config_fill && unlikely(opt_quarantine))
- quarantine(tsd, ptr);
- else
- idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path)
-{
-
- arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
-{
-
- if (slow_path && config_fill && unlikely(opt_quarantine))
- quarantine(tsd, ptr);
- else
- isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
-{
- void *p;
- size_t usize, copysize;
-
- usize = sa2u(size + extra, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
- return (NULL);
- p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
- if (p == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, without extra this time. */
- usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
- return (NULL);
- p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
- arena);
- if (p == NULL)
- return (NULL);
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(p, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
- return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
- bool zero, tcache_t *tcache, arena_t *arena)
-{
-
- assert(ptr != NULL);
- assert(size != 0);
-
- if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
- != 0) {
- /*
- * Existing object alignment is inadequate; allocate new space
- * and copy.
- */
- return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
- zero, tcache, arena));
- }
-
- return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
- tcache));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
- bool zero)
-{
-
- return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
- tcache_get(tsd, true), NULL));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
-{
-
- assert(ptr != NULL);
- assert(size != 0);
-
- if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
- != 0) {
- /* Existing object alignment is inadequate. */
- return (true);
- }
-
- return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
-}
-#endif
-
-#include "jemalloc/internal/prof.h"
-
-#undef JEMALLOC_H_INLINES
-/******************************************************************************/
-#endif /* JEMALLOC_INTERNAL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
index 46266325684c..12a7e5a86be6 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
-#define JEMALLOC_INTERNAL_DECLS_H
+#define JEMALLOC_INTERNAL_DECLS_H
#include "libc_private.h"
#include "namespace.h"
@@ -17,6 +17,11 @@
# if !defined(SYS_write) && defined(__NR_write)
# define SYS_write __NR_write
# endif
+# if defined(SYS_open) && defined(__aarch64__)
+ /* Android headers may define SYS_open to __NR_open even though
+ * __NR_open may not exist on AArch64 (superseded by __NR_openat). */
+# undef SYS_open
+# endif
# include <sys/uio.h>
# endif
# include <pthread.h>
@@ -39,6 +44,9 @@
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
+#ifndef SSIZE_MAX
+# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
+#endif
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
@@ -64,9 +72,7 @@ typedef intptr_t ssize_t;
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
-isblank(int c)
-{
-
+isblank(int c) {
return (c == '\t' || c == ' ');
}
#endif
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
index c6dc20aa8522..59ccdcb00e7a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
@@ -1,6 +1,6 @@
/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
-#define JEMALLOC_INTERNAL_DEFS_H_
+#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
@@ -10,6 +10,18 @@
/* #undef JEMALLOC_CPREFIX */
/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+/* #undef JEMALLOC_OVERRIDE___LIBC_CALLOC */
+/* #undef JEMALLOC_OVERRIDE___LIBC_FREE */
+/* #undef JEMALLOC_OVERRIDE___LIBC_MALLOC */
+/* #undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN */
+/* #undef JEMALLOC_OVERRIDE___LIBC_REALLOC */
+/* #undef JEMALLOC_OVERRIDE___LIBC_VALLOC */
+#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN
+
+/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
@@ -23,17 +35,21 @@
*/
#define CPU_SPINWAIT __asm__ volatile("pause")
+/*
+ * Number of significant bits in virtual addresses. This may be less than the
+ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
+ * bits are the same as bit 47.
+ */
+#define LG_VADDR 48
+
/* Defined if C11 atomics are available. */
-/* #undef JEMALLOC_C11ATOMICS */
+/* #undef JEMALLOC_C11_ATOMICS */
-/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
-#define JEMALLOC_ATOMIC9 1
+/* Defined if GCC __atomic atomics are available. */
+/* #undef JEMALLOC_GCC_ATOMIC_ATOMICS */
-/*
- * Defined if OSAtomic*() functions are available, as provided by Darwin, and
- * documented in the atomic(3) manual page.
- */
-/* #undef JEMALLOC_OSATOMIC */
+/* Defined if GCC __sync atomics are available. */
+#define JEMALLOC_GCC_SYNC_ATOMICS 1
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
@@ -41,7 +57,7 @@
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
-/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
+#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
@@ -49,7 +65,7 @@
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines).
*/
-/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
+#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
@@ -124,12 +140,6 @@
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
-/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
-#define JEMALLOC_CC_SILENCE
-
-/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
-/* #undef JEMALLOC_CODE_COVERAGE */
-
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
@@ -152,36 +162,23 @@
/* #undef JEMALLOC_PROF_GCC */
/*
- * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
- * This makes it possible to allocate/deallocate objects without any locking
- * when the cache is in the steady state.
- */
-#define JEMALLOC_TCACHE
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
* segment (DSS).
*/
#define JEMALLOC_DSS
-/* Support memory filling (junk/zero/quarantine/redzone). */
+/* Support memory filling (junk/zero). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
#define JEMALLOC_UTRACE
-/* Support Valgrind. */
-/* #undef JEMALLOC_VALGRIND */
-
/* Support optional abort() on OOM. */
#define JEMALLOC_XMALLOC
/* Support lazy locking (avoid locking unless a second thread is launched). */
#define JEMALLOC_LAZY_LOCK
-/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
-#define LG_TINY_MIN 3
-
/*
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
@@ -192,6 +189,13 @@
#define LG_PAGE 12
/*
+ * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
+ * system does not explicitly support huge pages; system calls that require
+ * explicit huge page support are separately configured.
+ */
+#define LG_HUGEPAGE 21
+
+/*
* If defined, adjacent virtual memory mappings with identical attributes
* automatically coalesce, and they fragment when changes are made to subranges.
* This is the normal order of things for mmap()/munmap(), but on Windows
@@ -201,11 +205,12 @@
#define JEMALLOC_MAPS_COALESCE
/*
- * If defined, use munmap() to unmap freed chunks, rather than storing them for
- * later reuse. This is disabled by default on Linux because common sequences
- * of mmap()/munmap() calls will cause virtual memory map holes.
+ * If defined, retain memory for later reuse by default rather than using e.g.
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
*/
-#define JEMALLOC_MUNMAP
+/* #undef JEMALLOC_RETAIN */
/* TLS is used to map arenas and magazine caches to threads. */
#define JEMALLOC_TLS
@@ -225,12 +230,6 @@
#define JEMALLOC_INTERNAL_FFS __builtin_ffs
/*
- * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
- * within jemalloc-owned chunks before dereferencing them.
- */
-/* #undef JEMALLOC_IVSALLOC */
-
-/*
* If defined, explicitly attempt to more uniformly distribute large allocation
* pointer alignments across all cache indices.
*/
@@ -254,24 +253,26 @@
#define JEMALLOC_HAVE_MADVISE
/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-/* #undef JEMALLOC_HAVE_MADVISE_HUGE */
-
-/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
* will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
- * new pages will be demand-zeroed if the
- * address region is later touched.
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
+ * defined, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched;
+ * otherwise this behaves similarly to
+ * MADV_FREE, though typically with higher
+ * system overhead.
*/
#define JEMALLOC_PURGE_MADVISE_FREE
#define JEMALLOC_PURGE_MADVISE_DONTNEED
+/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS */
-/* Defined if transparent huge page support is enabled. */
+/*
+ * Defined if transparent huge pages (THPs) are supported via the
+ * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
+ */
/* #undef JEMALLOC_THP */
/* Define if operating system has alloca.h header. */
@@ -301,9 +302,26 @@
/* glibc memalign hook. */
/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */
+/* pthread support */
+#define JEMALLOC_HAVE_PTHREAD
+
+/* dlsym() support */
+#define JEMALLOC_HAVE_DLSYM
+
/* Adaptive mutex support in pthreads. */
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+/* GNU specific sched_getcpu support */
+/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
+
+/* GNU specific sched_setaffinity support */
+/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
+
+/*
+ * If defined, all the features necessary for background threads are present.
+ */
+#define JEMALLOC_BACKGROUND_THREAD 1
+
/*
* If defined, jemalloc symbols are not exported (doesn't work when
* JEMALLOC_PREFIX is not defined).
@@ -313,4 +331,7 @@
/* config.malloc_conf options string. */
#define JEMALLOC_CONFIG_MALLOC_CONF ""
+/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
+#define JEMALLOC_IS_MALLOC 1
+
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
new file mode 100644
index 000000000000..e10fb275d407
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
@@ -0,0 +1,53 @@
+#ifndef JEMALLOC_INTERNAL_EXTERNS_H
+#define JEMALLOC_INTERNAL_EXTERNS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/tsd_types.h"
+
+/* TSD checks this to set thread local slow state accordingly. */
+extern bool malloc_slow;
+
+/* Run-time options. */
+extern bool opt_abort;
+extern bool opt_abort_conf;
+extern const char *opt_junk;
+extern bool opt_junk_alloc;
+extern bool opt_junk_free;
+extern bool opt_utrace;
+extern bool opt_xmalloc;
+extern bool opt_zero;
+extern unsigned opt_narenas;
+
+/* Number of CPUs. */
+extern unsigned ncpus;
+
+/* Number of arenas used for automatic multiplexing of threads and arenas. */
+extern unsigned narenas_auto;
+
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+extern atomic_p_t arenas[];
+
+void *a0malloc(size_t size);
+void a0dalloc(void *ptr);
+void *bootstrap_malloc(size_t size);
+void *bootstrap_calloc(size_t num, size_t size);
+void bootstrap_free(void *ptr);
+void arena_set(unsigned ind, arena_t *arena);
+unsigned narenas_total_get(void);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
+arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
+void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+void iarena_cleanup(tsd_t *tsd);
+void arena_cleanup(tsd_t *tsd);
+void arenas_tdata_cleanup(tsd_t *tsd);
+void jemalloc_prefork(void);
+void jemalloc_postfork_parent(void);
+void jemalloc_postfork_child(void);
+bool malloc_initialized(void);
+
+#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
new file mode 100644
index 000000000000..437eaa407939
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
@@ -0,0 +1,94 @@
+#ifndef JEMALLOC_INTERNAL_INCLUDES_H
+#define JEMALLOC_INTERNAL_INCLUDES_H
+
+/*
+ * jemalloc can conceptually be broken into components (arena, tcache, etc.),
+ * but there are circular dependencies that cannot be broken without
+ * substantial performance degradation.
+ *
+ * Historically, we dealt with this by each header into four sections (types,
+ * structs, externs, and inlines), and included each header file multiple times
+ * in this file, picking out the portion we want on each pass using the
+ * following #defines:
+ * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
+ * types.
+ * JEMALLOC_H_STRUCTS : Data structures.
+ * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
+ * JEMALLOC_H_INLINES : Inline functions.
+ *
+ * We're moving toward a world in which the dependencies are explicit; each file
+ * will #include the headers it depends on (rather than relying on them being
+ * implicitly available via this file including every header file in the
+ * project).
+ *
+ * We're now in an intermediate state: we've broken up the header files to avoid
+ * having to include each one multiple times, but have not yet moved the
+ * dependency information into the header files (i.e. we still rely on the
+ * ordering in this file to ensure all a header's dependencies are available in
+ * its translation unit). Each component is now broken up into multiple header
+ * files, corresponding to the sections above (e.g. instead of "foo.h", we now
+ * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
+ *
+ * Those files which have been converted to explicitly include their
+ * inter-component dependencies are now in the initial HERMETIC HEADERS
+ * section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
+ * must be included first in every translation unit) for system headers and
+ * global jemalloc definitions, however.
+ */
+
+/******************************************************************************/
+/* TYPES */
+/******************************************************************************/
+
+#include "jemalloc/internal/extent_types.h"
+#include "jemalloc/internal/base_types.h"
+#include "jemalloc/internal/arena_types.h"
+#include "jemalloc/internal/tcache_types.h"
+#include "jemalloc/internal/prof_types.h"
+
+/******************************************************************************/
+/* STRUCTS */
+/******************************************************************************/
+
+#include "jemalloc/internal/arena_structs_a.h"
+#include "jemalloc/internal/extent_structs.h"
+#include "jemalloc/internal/base_structs.h"
+#include "jemalloc/internal/prof_structs.h"
+#include "jemalloc/internal/arena_structs_b.h"
+#include "jemalloc/internal/tcache_structs.h"
+#include "jemalloc/internal/background_thread_structs.h"
+
+/******************************************************************************/
+/* EXTERNS */
+/******************************************************************************/
+
+#include "jemalloc/internal/jemalloc_internal_externs.h"
+#include "jemalloc/internal/extent_externs.h"
+#include "jemalloc/internal/base_externs.h"
+#include "jemalloc/internal/arena_externs.h"
+#include "jemalloc/internal/large_externs.h"
+#include "jemalloc/internal/tcache_externs.h"
+#include "jemalloc/internal/prof_externs.h"
+#include "jemalloc/internal/background_thread_externs.h"
+
+/******************************************************************************/
+/* INLINES */
+/******************************************************************************/
+
+#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
+#include "jemalloc/internal/base_inlines.h"
+/*
+ * Include portions of arena code interleaved with tcache code in order to
+ * resolve circular dependencies.
+ */
+#include "jemalloc/internal/prof_inlines_a.h"
+#include "jemalloc/internal/arena_inlines_a.h"
+#include "jemalloc/internal/extent_inlines.h"
+#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
+#include "jemalloc/internal/tcache_inlines.h"
+#include "jemalloc/internal/arena_inlines_b.h"
+#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
+#include "jemalloc/internal/prof_inlines_b.h"
+#include "jemalloc/internal/background_thread_inlines.h"
+
+#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
new file mode 100644
index 000000000000..854fb1e2c7e0
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
@@ -0,0 +1,168 @@
+#ifndef JEMALLOC_INTERNAL_INLINES_A_H
+#define JEMALLOC_INTERNAL_INLINES_A_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/ticker.h"
+
+JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
+malloc_getcpu(void) {
+ assert(have_percpu_arena);
+#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
+ return (malloc_cpuid_t)sched_getcpu();
+#else
+ not_reached();
+ return -1;
+#endif
+}
+
+/* Return the chosen arena index based on current cpu. */
+JEMALLOC_ALWAYS_INLINE unsigned
+percpu_arena_choose(void) {
+ assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
+
+ malloc_cpuid_t cpuid = malloc_getcpu();
+ assert(cpuid >= 0);
+
+ unsigned arena_ind;
+ if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
+ 2)) {
+ arena_ind = cpuid;
+ } else {
+ assert(opt_percpu_arena == per_phycpu_arena);
+ /* Hyper threads on the same physical CPU share arena. */
+ arena_ind = cpuid - ncpus / 2;
+ }
+
+ return arena_ind;
+}
+
+/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
+JEMALLOC_ALWAYS_INLINE unsigned
+percpu_arena_ind_limit(percpu_arena_mode_t mode) {
+ assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
+ if (mode == per_phycpu_arena && ncpus > 1) {
+ if (ncpus % 2) {
+ /* This likely means a misconfig. */
+ return ncpus / 2 + 1;
+ }
+ return ncpus / 2;
+ } else {
+ return ncpus;
+ }
+}
+
+static inline arena_tdata_t *
+arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
+ arena_tdata_t *tdata;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+
+ if (unlikely(arenas_tdata == NULL)) {
+ /* arenas_tdata hasn't been initialized yet. */
+ return arena_tdata_get_hard(tsd, ind);
+ }
+ if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
+ /*
+ * ind is invalid, cache is old (too small), or tdata to be
+ * initialized.
+ */
+ return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
+ NULL);
+ }
+
+ tdata = &arenas_tdata[ind];
+ if (likely(tdata != NULL) || !refresh_if_missing) {
+ return tdata;
+ }
+ return arena_tdata_get_hard(tsd, ind);
+}
+
+static inline arena_t *
+arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
+ arena_t *ret;
+
+ assert(ind < MALLOCX_ARENA_LIMIT);
+
+ ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
+ if (unlikely(ret == NULL)) {
+ if (init_if_missing) {
+ ret = arena_init(tsdn, ind,
+ (extent_hooks_t *)&extent_hooks_default);
+ }
+ }
+ return ret;
+}
+
+static inline ticker_t *
+decay_ticker_get(tsd_t *tsd, unsigned ind) {
+ arena_tdata_t *tdata;
+
+ tdata = arena_tdata_get(tsd, ind, true);
+ if (unlikely(tdata == NULL)) {
+ return NULL;
+ }
+ return &tdata->decay_ticker;
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_bin_t *
+tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
+ assert(binind < NBINS);
+ return &tcache->tbins_small[binind];
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_bin_t *
+tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
+ assert(binind >= NBINS &&binind < nhbins);
+ return &tcache->tbins_large[binind - NBINS];
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tcache_available(tsd_t *tsd) {
+ /*
+ * Thread specific auto tcache might be unavailable if: 1) during tcache
+ * initialization, or 2) disabled through thread.tcache.enabled mallctl
+ * or config options. This check covers all cases.
+ */
+ if (likely(tsd_tcache_enabled_get(tsd))) {
+ /* Associated arena == NULL implies tcache init in progress. */
+ assert(tsd_tcachep_get(tsd)->arena == NULL ||
+ tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
+ NULL);
+ return true;
+ }
+
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcache_get(tsd_t *tsd) {
+ if (!tcache_available(tsd)) {
+ return NULL;
+ }
+
+ return tsd_tcachep_get(tsd);
+}
+
+static inline void
+pre_reentrancy(tsd_t *tsd) {
+ bool fast = tsd_fast(tsd);
+ ++*tsd_reentrancy_levelp_get(tsd);
+ if (fast) {
+ /* Prepare slow path for reentrancy. */
+ tsd_slow_update(tsd);
+ assert(tsd->state == tsd_state_nominal_slow);
+ }
+}
+
+static inline void
+post_reentrancy(tsd_t *tsd) {
+ int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
+ assert(*reentrancy_level > 0);
+ if (--*reentrancy_level == 0) {
+ tsd_slow_update(tsd);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
new file mode 100644
index 000000000000..2e76e5d8f7ca
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
@@ -0,0 +1,86 @@
+#ifndef JEMALLOC_INTERNAL_INLINES_B_H
+#define JEMALLOC_INTERNAL_INLINES_B_H
+
+#include "jemalloc/internal/rtree.h"
+
+/* Choose an arena based on a per-thread value. */
+static inline arena_t *
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
+ arena_t *ret;
+
+ if (arena != NULL) {
+ return arena;
+ }
+
+ /* During reentrancy, arena 0 is the safest bet. */
+ if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
+ return arena_get(tsd_tsdn(tsd), 0, true);
+ }
+
+ ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
+ if (unlikely(ret == NULL)) {
+ ret = arena_choose_hard(tsd, internal);
+ assert(ret);
+ if (tcache_available(tsd)) {
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache->arena != NULL) {
+ /* See comments in tcache_data_init().*/
+ assert(tcache->arena ==
+ arena_get(tsd_tsdn(tsd), 0, false));
+ if (tcache->arena != ret) {
+ tcache_arena_reassociate(tsd_tsdn(tsd),
+ tcache, ret);
+ }
+ } else {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache,
+ ret);
+ }
+ }
+ }
+
+ /*
+ * Note that for percpu arena, if the current arena is outside of the
+ * auto percpu arena range, (i.e. thread is assigned to a manually
+ * managed arena), then percpu arena is skipped.
+ */
+ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
+ !internal && (arena_ind_get(ret) <
+ percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
+ tsd_tsdn(tsd))) {
+ unsigned ind = percpu_arena_choose();
+ if (arena_ind_get(ret) != ind) {
+ percpu_arena_update(tsd, ind);
+ ret = tsd_arena_get(tsd);
+ }
+ ret->last_thd = tsd_tsdn(tsd);
+ }
+
+ return ret;
+}
+
+static inline arena_t *
+arena_choose(tsd_t *tsd, arena_t *arena) {
+ return arena_choose_impl(tsd, arena, false);
+}
+
+static inline arena_t *
+arena_ichoose(tsd_t *tsd, arena_t *arena) {
+ return arena_choose_impl(tsd, arena, true);
+}
+
+static inline bool
+arena_is_auto(arena_t *arena) {
+ assert(narenas_auto > 0);
+ return (arena_ind_get(arena) < narenas_auto);
+}
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+iealloc(tsdn_t *tsdn, const void *ptr) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+}
+
+#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
new file mode 100644
index 000000000000..7ffce6fb0356
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -0,0 +1,197 @@
+#ifndef JEMALLOC_INTERNAL_INLINES_C_H
+#define JEMALLOC_INTERNAL_INLINES_C_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/witness.h"
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+iaalloc(tsdn_t *tsdn, const void *ptr) {
+ assert(ptr != NULL);
+
+ return arena_aalloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+isalloc(tsdn_t *tsdn, const void *ptr) {
+ assert(ptr != NULL);
+
+ return arena_salloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
+ bool is_internal, arena_t *arena, bool slow_path) {
+ void *ret;
+
+ assert(size != 0);
+ assert(!is_internal || tcache == NULL);
+ assert(!is_internal || arena == NULL || arena_is_auto(arena));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
+ if (config_stats && is_internal && likely(ret != NULL)) {
+ arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
+ }
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
+ return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
+ NULL, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, bool is_internal, arena_t *arena) {
+ void *ret;
+
+ assert(usize != 0);
+ assert(usize == sz_sa2u(usize, alignment));
+ assert(!is_internal || tcache == NULL);
+ assert(!is_internal || arena == NULL || arena_is_auto(arena));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
+ assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
+ if (config_stats && is_internal && likely(ret != NULL)) {
+ arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
+ }
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena) {
+ return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
+ return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
+ tcache_get(tsd), false, NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+ivsalloc(tsdn_t *tsdn, const void *ptr) {
+ return arena_vsalloc(tsdn, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
+ bool is_internal, bool slow_path) {
+ assert(ptr != NULL);
+ assert(!is_internal || tcache == NULL);
+ assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ if (config_stats && is_internal) {
+ arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
+ }
+ if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
+ assert(tcache == NULL);
+ }
+ arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloc(tsd_t *tsd, void *ptr) {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ alloc_ctx_t *alloc_ctx, bool slow_path) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache,
+ arena_t *arena) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ void *p;
+ size_t usize, copysize;
+
+ usize = sz_sa2u(size + extra, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
+ if (p == NULL) {
+ if (extra == 0) {
+ return NULL;
+ }
+ /* Try again, without extra this time. */
+ usize = sz_sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
+ if (p == NULL) {
+ return NULL;
+ }
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
+ return p;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache, arena_t *arena) {
+ assert(ptr != NULL);
+ assert(size != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /*
+ * Existing object alignment is inadequate; allocate new space
+ * and copy.
+ */
+ return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment,
+ zero, tcache, arena);
+ }
+
+ return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
+ tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero) {
+ return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
+ tcache_get(tsd), NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero) {
+ assert(ptr != NULL);
+ assert(size != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ return true;
+ }
+
+ return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
+}
+
+#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
index a08ba772ead4..4571895ec371 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
@@ -1,57 +1,40 @@
-/*
- * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
- * functions that are static inline functions if inlining is enabled, and
- * single-definition library-private functions if inlining is disabled.
- *
- * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
- * which case the denoted functions are always static, regardless of whether
- * inlining is enabled.
- */
-#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
- /* Disable inlining to make debugging/profiling easier. */
-# define JEMALLOC_ALWAYS_INLINE
-# define JEMALLOC_ALWAYS_INLINE_C static
-# define JEMALLOC_INLINE
-# define JEMALLOC_INLINE_C static
-# define inline
-#else
-# define JEMALLOC_ENABLE_INLINE
-# ifdef JEMALLOC_HAVE_ATTR
-# define JEMALLOC_ALWAYS_INLINE \
- static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
-# define JEMALLOC_ALWAYS_INLINE_C \
- static inline JEMALLOC_ATTR(always_inline)
-# else
-# define JEMALLOC_ALWAYS_INLINE static inline
-# define JEMALLOC_ALWAYS_INLINE_C static inline
-# endif
-# define JEMALLOC_INLINE static inline
-# define JEMALLOC_INLINE_C static inline
-# ifdef _MSC_VER
-# define inline _inline
-# endif
-#endif
+#ifndef JEMALLOC_INTERNAL_MACROS_H
+#define JEMALLOC_INTERNAL_MACROS_H
-#ifdef JEMALLOC_CC_SILENCE
-# define UNUSED JEMALLOC_ATTR(unused)
+#ifdef JEMALLOC_DEBUG
+# define JEMALLOC_ALWAYS_INLINE static inline
#else
-# define UNUSED
+# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
#endif
+#ifdef _MSC_VER
+# define inline _inline
+#endif
+
+#define UNUSED JEMALLOC_ATTR(unused)
-#define ZU(z) ((size_t)z)
-#define ZI(z) ((ssize_t)z)
-#define QU(q) ((uint64_t)q)
-#define QI(q) ((int64_t)q)
+#define ZU(z) ((size_t)z)
+#define ZD(z) ((ssize_t)z)
+#define QU(q) ((uint64_t)q)
+#define QD(q) ((int64_t)q)
-#define KZU(z) ZU(z##ULL)
-#define KZI(z) ZI(z##LL)
-#define KQU(q) QU(q##ULL)
-#define KQI(q) QI(q##LL)
+#define KZU(z) ZU(z##ULL)
+#define KZD(z) ZD(z##LL)
+#define KQU(q) QU(q##ULL)
+#define KQD(q) QI(q##LL)
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
-#ifndef JEMALLOC_HAS_RESTRICT
+#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
# define restrict
#endif
+
+/* Various function pointers are statick and immutable except during testing. */
+#ifdef JEMALLOC_JET
+# define JET_MUTABLE
+#else
+# define JET_MUTABLE const
+#endif
+
+#endif /* JEMALLOC_INTERNAL_MACROS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
new file mode 100644
index 000000000000..50f9d001d54d
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
@@ -0,0 +1,178 @@
+#ifndef JEMALLOC_INTERNAL_TYPES_H
+#define JEMALLOC_INTERNAL_TYPES_H
+
+/* Page size index type. */
+typedef unsigned pszind_t;
+
+/* Size class index type. */
+typedef unsigned szind_t;
+
+/* Processor / core id type. */
+typedef int malloc_cpuid_t;
+
+/*
+ * Flags bits:
+ *
+ * a: arena
+ * t: tcache
+ * 0: unused
+ * z: zero
+ * n: alignment
+ *
+ * aaaaaaaa aaaatttt tttttttt 0znnnnnn
+ */
+#define MALLOCX_ARENA_BITS 12
+#define MALLOCX_TCACHE_BITS 12
+#define MALLOCX_LG_ALIGN_BITS 6
+#define MALLOCX_ARENA_SHIFT 20
+#define MALLOCX_TCACHE_SHIFT 8
+#define MALLOCX_ARENA_MASK \
+ (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
+/* NB: Arena index bias decreases the maximum number of arenas by 1. */
+#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
+#define MALLOCX_TCACHE_MASK \
+ (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
+#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
+#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
+/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
+#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
+ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
+#define MALLOCX_ALIGN_GET(flags) \
+ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
+#define MALLOCX_ZERO_GET(flags) \
+ ((bool)(flags & MALLOCX_ZERO))
+
+#define MALLOCX_TCACHE_GET(flags) \
+ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
+#define MALLOCX_ARENA_GET(flags) \
+ (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
+
+/* Smallest size class to support. */
+#define TINY_MIN (1U << LG_TINY_MIN)
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#ifndef LG_QUANTUM
+# if (defined(__i386__) || defined(_M_IX86))
+# define LG_QUANTUM 4
+# endif
+# ifdef __ia64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __alpha__
+# define LG_QUANTUM 4
+# endif
+# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
+# define LG_QUANTUM 4
+# endif
+# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
+# define LG_QUANTUM 4
+# endif
+# ifdef __arm__
+# define LG_QUANTUM 3
+# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __hppa__
+# define LG_QUANTUM 4
+# endif
+# ifdef __mips__
+# define LG_QUANTUM 3
+# endif
+# ifdef __or1k__
+# define LG_QUANTUM 3
+# endif
+# ifdef __powerpc__
+# define LG_QUANTUM 4
+# endif
+# ifdef __riscv__
+# define LG_QUANTUM 4
+# endif
+# ifdef __s390__
+# define LG_QUANTUM 4
+# endif
+# ifdef __SH4__
+# define LG_QUANTUM 4
+# endif
+# ifdef __tile__
+# define LG_QUANTUM 4
+# endif
+# ifdef __le32__
+# define LG_QUANTUM 4
+# endif
+# ifndef LG_QUANTUM
+# error "Unknown minimum alignment for architecture; specify via "
+ "--with-lg-quantum"
+# endif
+#endif
+
+#define QUANTUM ((size_t)(1U << LG_QUANTUM))
+#define QUANTUM_MASK (QUANTUM - 1)
+
+/* Return the smallest quantum multiple that is >= a. */
+#define QUANTUM_CEILING(a) \
+ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
+
+#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
+#define LONG_MASK (LONG - 1)
+
+/* Return the smallest long multiple that is >= a. */
+#define LONG_CEILING(a) \
+ (((a) + LONG_MASK) & ~LONG_MASK)
+
+#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
+#define PTR_MASK (SIZEOF_PTR - 1)
+
+/* Return the smallest (void *) multiple that is >= a. */
+#define PTR_CEILING(a) \
+ (((a) + PTR_MASK) & ~PTR_MASK)
+
+/*
+ * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
+ * In addition, this controls the spacing of cacheline-spaced size classes.
+ *
+ * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
+ * only handle raw constants.
+ */
+#define LG_CACHELINE 6
+#define CACHELINE 64
+#define CACHELINE_MASK (CACHELINE - 1)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define CACHELINE_CEILING(s) \
+ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
+
+/* Return the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2BASE(a, alignment) \
+ ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
+
+/* Return the offset between a and the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
+
+/* Return the smallest alignment multiple that is >= s. */
+#define ALIGNMENT_CEILING(s, alignment) \
+ (((s) + (alignment - 1)) & ((~(alignment)) + 1))
+
+/* Declare a variable-length array. */
+#if __STDC_VERSION__ < 199901L
+# ifdef _MSC_VER
+# include <malloc.h>
+# define alloca _alloca
+# else
+# ifdef JEMALLOC_HAS_ALLOCA_H
+# include <alloca.h>
+# else
+# include <stdlib.h>
+# endif
+# endif
+# define VARIABLE_ARRAY(type, name, count) \
+ type *name = alloca(sizeof(type) * (count))
+#else
+# define VARIABLE_ARRAY(type, name, count) type name[(count)]
+#endif
+
+#endif /* JEMALLOC_INTERNAL_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
new file mode 100644
index 000000000000..63b15a834653
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
@@ -0,0 +1,176 @@
+#ifndef JEMALLOC_PREAMBLE_H
+#define JEMALLOC_PREAMBLE_H
+
+#include "jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+#ifdef JEMALLOC_UTRACE
+#include <sys/ktrace.h>
+#endif
+
+#include "un-namespace.h"
+#include "libc_private.h"
+
+#define JEMALLOC_NO_DEMANGLE
+#ifdef JEMALLOC_JET
+# undef JEMALLOC_IS_MALLOC
+# define JEMALLOC_N(n) jet_##n
+# include "jemalloc/internal/public_namespace.h"
+# define JEMALLOC_NO_RENAME
+# include "../jemalloc.h"
+# undef JEMALLOC_NO_RENAME
+#else
+# define JEMALLOC_N(n) __je_##n
+# include "../jemalloc.h"
+#endif
+
+#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
+#include <libkern/OSAtomic.h>
+#endif
+
+#ifdef JEMALLOC_ZONE
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#include "jemalloc/internal/jemalloc_internal_macros.h"
+
+/*
+ * Note that the ordering matters here; the hook itself is name-mangled. We
+ * want the inclusion of hooks to happen early, so that we hook as much as
+ * possible.
+ */
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
+# ifndef JEMALLOC_JET
+# include "jemalloc/internal/private_namespace.h"
+# else
+# include "jemalloc/internal/private_namespace_jet.h"
+# endif
+#endif
+#include "jemalloc/internal/hooks.h"
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_lazy_lock = true;
+static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_thp =
+#ifdef JEMALLOC_THP
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+#ifdef JEMALLOC_HAVE_SCHED_GETCPU
+/* Currently percpu_arena depends on sched_getcpu. */
+#define JEMALLOC_PERCPU_ARENA
+#endif
+static const bool have_percpu_arena =
+#ifdef JEMALLOC_PERCPU_ARENA
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, and not recommended; the application should take full
+ * responsibility for tracking provenance.
+ */
+static const bool force_ivsalloc =
+#ifdef JEMALLOC_FORCE_IVSALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_background_thread =
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ true
+#else
+ false
+#endif
+ ;
+
+#endif /* JEMALLOC_PREAMBLE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/large_externs.h b/contrib/jemalloc/include/jemalloc/internal/large_externs.h
new file mode 100644
index 000000000000..3f36282cd403
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/large_externs.h
@@ -0,0 +1,26 @@
+#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
+#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
+
+void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
+void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero);
+bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero);
+void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache);
+
+typedef void (large_dalloc_junk_t)(void *, size_t);
+extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
+
+typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
+extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
+
+void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
+void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
+void large_dalloc(tsdn_t *tsdn, extent_t *extent);
+size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
+prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
+void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
+void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
+
+#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/malloc_io.h b/contrib/jemalloc/include/jemalloc/internal/malloc_io.h
new file mode 100644
index 000000000000..47ae58ec352f
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/malloc_io.h
@@ -0,0 +1,62 @@
+#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
+#define JEMALLOC_INTERNAL_MALLOC_IO_H
+
+#ifdef _WIN32
+# ifdef _WIN64
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX "ll"
+# else
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX ""
+# endif
+# define FMTd32 "d"
+# define FMTu32 "u"
+# define FMTx32 "x"
+# define FMTd64 FMT64_PREFIX "d"
+# define FMTu64 FMT64_PREFIX "u"
+# define FMTx64 FMT64_PREFIX "x"
+# define FMTdPTR FMTPTR_PREFIX "d"
+# define FMTuPTR FMTPTR_PREFIX "u"
+# define FMTxPTR FMTPTR_PREFIX "x"
+#else
+# include <inttypes.h>
+# define FMTd32 PRId32
+# define FMTu32 PRIu32
+# define FMTx32 PRIx32
+# define FMTd64 PRId64
+# define FMTu64 PRIu64
+# define FMTx64 PRIx64
+# define FMTdPTR PRIdPTR
+# define FMTuPTR PRIuPTR
+# define FMTxPTR PRIxPTR
+#endif
+
+/* Size of stack-allocated buffer passed to buferror(). */
+#define BUFERROR_BUF 64
+
+/*
+ * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
+ * large enough for all possible uses within jemalloc.
+ */
+#define MALLOC_PRINTF_BUFSIZE 4096
+
+int buferror(int err, char *buf, size_t buflen);
+uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
+ int base);
+void malloc_write(const char *s);
+
+/*
+ * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
+ * point math.
+ */
+size_t malloc_vsnprintf(char *str, size_t size, const char *format,
+ va_list ap);
+size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
+ JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap);
+void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+
+#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/mb.h b/contrib/jemalloc/include/jemalloc/internal/mb.h
deleted file mode 100644
index e58da5c32477..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/mb.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void mb_write(void);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
-#ifdef __i386__
-/*
- * According to the Intel Architecture Software Developer's Manual, current
- * processors execute instructions in order from the perspective of other
- * processors in a multiprocessor system, but 1) Intel reserves the right to
- * change that, and 2) the compiler's optimizer could re-order instructions if
- * there weren't some form of barrier. Therefore, even if running on an
- * architecture that does not need memory barriers (everything through at least
- * i686), an "optimizer barrier" is necessary.
- */
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
-# if 0
- /* This is a true memory barrier. */
- asm volatile ("pusha;"
- "xor %%eax,%%eax;"
- "cpuid;"
- "popa;"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
-# else
- /*
- * This is hopefully enough to keep the compiler from reordering
- * instructions around this one.
- */
- asm volatile ("nop;"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
-# endif
-}
-#elif (defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
- asm volatile ("sfence"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
-}
-#elif defined(__powerpc__)
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
- asm volatile ("eieio"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
-}
-#elif defined(__sparc__) && defined(__arch64__)
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
- asm volatile ("membar #StoreStore"
- : /* Outputs. */
- : /* Inputs. */
- : "memory" /* Clobbers. */
- );
-}
-#elif defined(__tile__)
-JEMALLOC_INLINE void
-mb_write(void)
-{
-
- __sync_synchronize();
-}
-#else
-/*
- * This is much slower than a simple memory barrier, but the semantics of mutex
- * unlock make this work.
- */
-JEMALLOC_INLINE void
-mb_write(void)
-{
- malloc_mutex_t mtx;
-
- malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
- malloc_mutex_lock(TSDN_NULL, &mtx);
- malloc_mutex_unlock(TSDN_NULL, &mtx);
-}
-#endif
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex.h b/contrib/jemalloc/include/jemalloc/internal/mutex.h
index e03a6d071ea4..0013cbe9eebd 100644
--- a/contrib/jemalloc/include/jemalloc/internal/mutex.h
+++ b/contrib/jemalloc/include/jemalloc/internal/mutex.h
@@ -1,143 +1,246 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_MUTEX_H
+#define JEMALLOC_INTERNAL_MUTEX_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/witness.h"
+
+typedef enum {
+ /* Can only acquire one mutex of a given witness rank at a time. */
+ malloc_mutex_rank_exclusive,
+ /*
+ * Can acquire multiple mutexes of the same witness rank, but in
+ * address-ascending order only.
+ */
+ malloc_mutex_address_ordered
+} malloc_mutex_lock_order_t;
typedef struct malloc_mutex_s malloc_mutex_t;
-
-#ifdef _WIN32
-# define MALLOC_MUTEX_INITIALIZER
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# define MALLOC_MUTEX_INITIALIZER \
- {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
-#elif (defined(JEMALLOC_OSSPIN))
-# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
-#else
-# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
- defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
-# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
- WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
-# else
-# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
-# endif
-#endif
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
struct malloc_mutex_s {
+ union {
+ struct {
+ /*
+ * prof_data is defined first to reduce cacheline
+ * bouncing: the data is not touched by the mutex holder
+ * during unlocking, while might be modified by
+ * contenders. Having it before the mutex itself could
+ * avoid prefetching a modified cacheline (for the
+ * unlocking thread).
+ */
+ mutex_prof_data_t prof_data;
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
- SRWLOCK lock;
+ SRWLOCK lock;
# else
- CRITICAL_SECTION lock;
+ CRITICAL_SECTION lock;
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock lock;
+ os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
- OSSpinLock lock;
+ OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
- pthread_mutex_t lock;
- malloc_mutex_t *postponed_next;
+ pthread_mutex_t lock;
+ malloc_mutex_t *postponed_next;
#else
- pthread_mutex_t lock;
+ pthread_mutex_t lock;
#endif
- witness_t witness;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#ifdef JEMALLOC_LAZY_LOCK
-extern bool isthreaded;
+ };
+ /*
+ * We only touch witness when configured w/ debug. However we
+ * keep the field in a union when !debug so that we don't have
+ * to pollute the code base with #ifdefs, while avoid paying the
+ * memory cost.
+ */
+#if !defined(JEMALLOC_DEBUG)
+ witness_t witness;
+ malloc_mutex_lock_order_t lock_order;
#endif
+ };
-bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- witness_rank_t rank);
-void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-bool malloc_mutex_first_thread(void);
-bool malloc_mutex_boot(void);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+#if defined(JEMALLOC_DEBUG)
+ witness_t witness;
+ malloc_mutex_lock_order_t lock_order;
#endif
+};
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-JEMALLOC_INLINE void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+/*
+ * Based on benchmark results, a fixed spin with this amount of retries works
+ * well for our critical sections.
+ */
+#define MALLOC_MUTEX_MAX_SPIN 250
- witness_assert_not_owner(tsdn, &mutex->witness);
- if (isthreaded) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
- AcquireSRWLockExclusive(&mutex->lock);
+# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
# else
- EnterCriticalSection(&mutex->lock);
+# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_lock(&mutex->lock);
+# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
#elif (defined(JEMALLOC_OSSPIN))
- OSSpinLockLock(&mutex->lock);
+# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
#else
- pthread_mutex_lock(&mutex->lock);
+# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
+# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
+# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
#endif
- }
- witness_lock(tsdn, &mutex->witness);
-}
-JEMALLOC_INLINE void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+#define LOCK_PROF_DATA_INITIALIZER \
+ {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
+ ATOMIC_INIT(0), 0, NULL, 0}
- witness_unlock(tsdn, &mutex->witness);
- if (isthreaded) {
#ifdef _WIN32
-# if _WIN32_WINNT >= 0x0600
- ReleaseSRWLockExclusive(&mutex->lock);
-# else
- LeaveCriticalSection(&mutex->lock);
-# endif
+# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_unlock(&mutex->lock);
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
- OSSpinLockUnlock(&mutex->lock);
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
- pthread_mutex_unlock(&mutex->lock);
+# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
+# define MALLOC_MUTEX_INITIALIZER \
+ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
+ WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#endif
+
+#ifdef JEMALLOC_LAZY_LOCK
+extern bool isthreaded;
+#endif
+
+bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
+void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_first_thread(void);
+bool malloc_mutex_boot(void);
+void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
+
+void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
+
+static inline void
+malloc_mutex_lock_final(malloc_mutex_t *mutex) {
+ MALLOC_MUTEX_LOCK(mutex);
+}
+
+static inline bool
+malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
+ return MALLOC_MUTEX_TRYLOCK(mutex);
+}
+
+static inline void
+mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ if (config_stats) {
+ mutex_prof_data_t *data = &mutex->prof_data;
+ data->n_lock_ops++;
+ if (data->prev_owner != tsdn) {
+ data->prev_owner = tsdn;
+ data->n_owner_switches++;
+ }
+ }
+}
+
+/* Trylock: return false if the lock is successfully acquired. */
+static inline bool
+malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+ if (isthreaded) {
+ if (malloc_mutex_trylock_final(mutex)) {
+ return true;
+ }
+ mutex_owner_stats_update(tsdn, mutex);
}
+ witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+
+ return false;
}
-JEMALLOC_INLINE void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+/* Aggregate lock prof data. */
+static inline void
+malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
+ nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
+ if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
+ nstime_copy(&sum->max_wait_time, &data->max_wait_time);
+ }
- witness_assert_owner(tsdn, &mutex->witness);
+ sum->n_wait_times += data->n_wait_times;
+ sum->n_spin_acquired += data->n_spin_acquired;
+
+ if (sum->max_n_thds < data->max_n_thds) {
+ sum->max_n_thds = data->max_n_thds;
+ }
+ uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
+ ATOMIC_RELAXED);
+ uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
+ &data->n_waiting_thds, ATOMIC_RELAXED);
+ atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
+ ATOMIC_RELAXED);
+ sum->n_owner_switches += data->n_owner_switches;
+ sum->n_lock_ops += data->n_lock_ops;
}
-JEMALLOC_INLINE void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+static inline void
+malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+ if (isthreaded) {
+ if (malloc_mutex_trylock_final(mutex)) {
+ malloc_mutex_lock_slow(mutex);
+ }
+ mutex_owner_stats_update(tsdn, mutex);
+ }
+ witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+}
- witness_assert_not_owner(tsdn, &mutex->witness);
+static inline void
+malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+ if (isthreaded) {
+ MALLOC_MUTEX_UNLOCK(mutex);
+ }
+}
+
+static inline void
+malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+}
+
+static inline void
+malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
+}
+
+/* Copy the prof data from mutex for processing. */
+static inline void
+malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ mutex_prof_data_t *source = &mutex->prof_data;
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+
+ /*
+ * Not *really* allowed (we shouldn't be doing non-atomic loads of
+ * atomic data), but the mutex protection makes this safe, and writing
+ * a member-for-member copy is tedious for this situation.
+ */
+ *data = *source;
+ /* n_wait_thds is not reported (modified w/o locking). */
+ atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_MUTEX_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h b/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h
new file mode 100644
index 000000000000..726cece90bc7
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h
@@ -0,0 +1,94 @@
+#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
+#define JEMALLOC_INTERNAL_MUTEX_POOL_H
+
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/witness.h"
+
+/* We do mod reductions by this value, so it should be kept a power of 2. */
+#define MUTEX_POOL_SIZE 256
+
+typedef struct mutex_pool_s mutex_pool_t;
+struct mutex_pool_s {
+ malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
+};
+
+bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
+
+/* Internal helper - not meant to be called outside this module. */
+static inline malloc_mutex_t *
+mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
+ size_t hash_result[2];
+ hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
+ return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
+}
+
+static inline void
+mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
+ for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
+ malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
+ }
+}
+
+/*
+ * Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
+ * You're not allowed to acquire mutexes in the pool one at a time. You have to
+ * acquire all the mutexes you'll need in a single function call, and then
+ * release them all in a single function call.
+ */
+
+static inline void
+mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
+ mutex_pool_assert_not_held(tsdn, pool);
+
+ malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
+ malloc_mutex_lock(tsdn, mutex);
+}
+
+static inline void
+mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
+ malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
+ malloc_mutex_unlock(tsdn, mutex);
+
+ mutex_pool_assert_not_held(tsdn, pool);
+}
+
+static inline void
+mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
+ uintptr_t key2) {
+ mutex_pool_assert_not_held(tsdn, pool);
+
+ malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
+ malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
+ if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
+ malloc_mutex_lock(tsdn, mutex1);
+ malloc_mutex_lock(tsdn, mutex2);
+ } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
+ malloc_mutex_lock(tsdn, mutex1);
+ } else {
+ malloc_mutex_lock(tsdn, mutex2);
+ malloc_mutex_lock(tsdn, mutex1);
+ }
+}
+
+static inline void
+mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
+ uintptr_t key2) {
+ malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
+ malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
+ if (mutex1 == mutex2) {
+ malloc_mutex_unlock(tsdn, mutex1);
+ } else {
+ malloc_mutex_unlock(tsdn, mutex1);
+ malloc_mutex_unlock(tsdn, mutex2);
+ }
+
+ mutex_pool_assert_not_held(tsdn, pool);
+}
+
+static inline void
+mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
+ malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
+}
+
+#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h b/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h
new file mode 100644
index 000000000000..3358bcf53513
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h
@@ -0,0 +1,86 @@
+#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
+#define JEMALLOC_INTERNAL_MUTEX_PROF_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/tsd_types.h"
+
+#define MUTEX_PROF_GLOBAL_MUTEXES \
+ OP(background_thread) \
+ OP(ctl) \
+ OP(prof)
+
+typedef enum {
+#define OP(mtx) global_prof_mutex_##mtx,
+ MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+ mutex_prof_num_global_mutexes
+} mutex_prof_global_ind_t;
+
+#define MUTEX_PROF_ARENA_MUTEXES \
+ OP(large) \
+ OP(extent_avail) \
+ OP(extents_dirty) \
+ OP(extents_muzzy) \
+ OP(extents_retained) \
+ OP(decay_dirty) \
+ OP(decay_muzzy) \
+ OP(base) \
+ OP(tcache_list)
+
+typedef enum {
+#define OP(mtx) arena_prof_mutex_##mtx,
+ MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+ mutex_prof_num_arena_mutexes
+} mutex_prof_arena_ind_t;
+
+#define MUTEX_PROF_COUNTERS \
+ OP(num_ops, uint64_t) \
+ OP(num_wait, uint64_t) \
+ OP(num_spin_acq, uint64_t) \
+ OP(num_owner_switch, uint64_t) \
+ OP(total_wait_time, uint64_t) \
+ OP(max_wait_time, uint64_t) \
+ OP(max_num_thds, uint32_t)
+
+typedef enum {
+#define OP(counter, type) mutex_counter_##counter,
+ MUTEX_PROF_COUNTERS
+#undef OP
+ mutex_prof_num_counters
+} mutex_prof_counter_ind_t;
+
+typedef struct {
+ /*
+ * Counters touched on the slow path, i.e. when there is lock
+ * contention. We update them once we have the lock.
+ */
+ /* Total time (in nano seconds) spent waiting on this mutex. */
+ nstime_t tot_wait_time;
+ /* Max time (in nano seconds) spent on a single lock operation. */
+ nstime_t max_wait_time;
+ /* # of times have to wait for this mutex (after spinning). */
+ uint64_t n_wait_times;
+ /* # of times acquired the mutex through local spinning. */
+ uint64_t n_spin_acquired;
+ /* Max # of threads waiting for the mutex at the same time. */
+ uint32_t max_n_thds;
+ /* Current # of threads waiting on the lock. Atomic synced. */
+ atomic_u32_t n_waiting_thds;
+
+ /*
+ * Data touched on the fast path. These are modified right after we
+ * grab the lock, so it's placed closest to the end (i.e. right before
+ * the lock) so that we have a higher chance of them being on the same
+ * cacheline.
+ */
+ /* # of times the mutex holder is different than the previous one. */
+ uint64_t n_owner_switches;
+ /* Previous mutex holder, to facilitate n_owner_switches. */
+ tsdn_t *prev_owner;
+ /* # of lock() operations in total. */
+ uint64_t n_lock_ops;
+} mutex_prof_data_t;
+
+#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/nstime.h b/contrib/jemalloc/include/jemalloc/internal/nstime.h
index 93b27dc80a05..17c177c7f4b3 100644
--- a/contrib/jemalloc/include/jemalloc/internal/nstime.h
+++ b/contrib/jemalloc/include/jemalloc/internal/nstime.h
@@ -1,48 +1,34 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct nstime_s nstime_t;
+#ifndef JEMALLOC_INTERNAL_NSTIME_H
+#define JEMALLOC_INTERNAL_NSTIME_H
/* Maximum supported number of seconds (~584 years). */
-#define NSTIME_SEC_MAX KQU(18446744072)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct nstime_s {
- uint64_t ns;
-};
+#define NSTIME_SEC_MAX KQU(18446744072)
+#define NSTIME_ZERO_INITIALIZER {0}
+
+typedef struct {
+ uint64_t ns;
+} nstime_t;
+
+void nstime_init(nstime_t *time, uint64_t ns);
+void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
+uint64_t nstime_ns(const nstime_t *time);
+uint64_t nstime_sec(const nstime_t *time);
+uint64_t nstime_msec(const nstime_t *time);
+uint64_t nstime_nsec(const nstime_t *time);
+void nstime_copy(nstime_t *time, const nstime_t *source);
+int nstime_compare(const nstime_t *a, const nstime_t *b);
+void nstime_add(nstime_t *time, const nstime_t *addend);
+void nstime_iadd(nstime_t *time, uint64_t addend);
+void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
+void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
+void nstime_imultiply(nstime_t *time, uint64_t multiplier);
+void nstime_idivide(nstime_t *time, uint64_t divisor);
+uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void nstime_init(nstime_t *time, uint64_t ns);
-void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
-uint64_t nstime_ns(const nstime_t *time);
-uint64_t nstime_sec(const nstime_t *time);
-uint64_t nstime_nsec(const nstime_t *time);
-void nstime_copy(nstime_t *time, const nstime_t *source);
-int nstime_compare(const nstime_t *a, const nstime_t *b);
-void nstime_add(nstime_t *time, const nstime_t *addend);
-void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
-void nstime_imultiply(nstime_t *time, uint64_t multiplier);
-void nstime_idivide(nstime_t *time, uint64_t divisor);
-uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
-#ifdef JEMALLOC_JET
typedef bool (nstime_monotonic_t)(void);
-extern nstime_monotonic_t *nstime_monotonic;
-typedef bool (nstime_update_t)(nstime_t *);
-extern nstime_update_t *nstime_update;
-#else
-bool nstime_monotonic(void);
-bool nstime_update(nstime_t *time);
-#endif
+extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
+typedef bool (nstime_update_t)(nstime_t *);
+extern nstime_update_t *JET_MUTABLE nstime_update;
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_NSTIME_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/pages.h b/contrib/jemalloc/include/jemalloc/internal/pages.h
index 4ae9f156a7b3..28383b7f9738 100644
--- a/contrib/jemalloc/include/jemalloc/internal/pages.h
+++ b/contrib/jemalloc/include/jemalloc/internal/pages.h
@@ -1,29 +1,71 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
+#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
+/* Page size. LG_PAGE is determined by the configure script. */
+#ifdef PAGE_MASK
+# undef PAGE_MASK
+#endif
+#define PAGE ((size_t)(1U << LG_PAGE))
+#define PAGE_MASK ((size_t)(PAGE - 1))
+/* Return the page base address for the page containing address a. */
+#define PAGE_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~PAGE_MASK))
+/* Return the smallest pagesize multiple that is >= s. */
+#define PAGE_CEILING(s) \
+ (((s) + PAGE_MASK) & ~PAGE_MASK)
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
+/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
+#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
+#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
+/* Return the huge page base address for the huge page containing address a. */
+#define HUGEPAGE_ADDR2BASE(a) \
+ ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
+/* Return the smallest pagesize multiple that is >= s. */
+#define HUGEPAGE_CEILING(s) \
+ (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
-void *pages_map(void *addr, size_t size, bool *commit);
-void pages_unmap(void *addr, size_t size);
-void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
- size_t size, bool *commit);
-bool pages_commit(void *addr, size_t size);
-bool pages_decommit(void *addr, size_t size);
-bool pages_purge(void *addr, size_t size);
-bool pages_huge(void *addr, size_t size);
-bool pages_nohuge(void *addr, size_t size);
-void pages_boot(void);
+/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
+#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define PAGES_CAN_PURGE_LAZY
+#endif
+/*
+ * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
+ *
+ * The only supported way to hard-purge on Windows is to decommit and then
+ * re-commit, but doing so is racy, and if re-commit fails it's a pain to
+ * propagate the "poisoned" memory state. Since we typically decommit as the
+ * next step after purging on Windows anyway, there's no point in adding such
+ * complexity.
+ */
+#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
+ defined(JEMALLOC_MAPS_COALESCE))
+# define PAGES_CAN_PURGE_FORCED
+#endif
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
+static const bool pages_can_purge_lazy =
+#ifdef PAGES_CAN_PURGE_LAZY
+ true
+#else
+ false
+#endif
+ ;
+static const bool pages_can_purge_forced =
+#ifdef PAGES_CAN_PURGE_FORCED
+ true
+#else
+ false
+#endif
+ ;
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
+void pages_unmap(void *addr, size_t size);
+bool pages_commit(void *addr, size_t size);
+bool pages_decommit(void *addr, size_t size);
+bool pages_purge_lazy(void *addr, size_t size);
+bool pages_purge_forced(void *addr, size_t size);
+bool pages_huge(void *addr, size_t size);
+bool pages_nohuge(void *addr, size_t size);
+bool pages_boot(void);
+#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ph.h b/contrib/jemalloc/include/jemalloc/internal/ph.h
index 4f91c333fd28..84d6778a906e 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ph.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ph.h
@@ -13,10 +13,10 @@
*/
#ifndef PH_H_
-#define PH_H_
+#define PH_H_
/* Node structure. */
-#define phn(a_type) \
+#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
@@ -24,31 +24,31 @@ struct { \
}
/* Root structure. */
-#define ph(a_type) \
+#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
-#define phn_lchild_get(a_type, a_field, a_phn) \
+#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
-#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
+#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
-#define phn_next_get(a_type, a_field, a_phn) \
+#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
-#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
+#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
-#define phn_prev_get(a_type, a_field, a_phn) \
+#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
-#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
+#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
-#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
+#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
@@ -58,17 +58,18 @@ struct { \
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
- if (phn0child != NULL) \
+ if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
+ } \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
-#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
- if (a_phn0 == NULL) \
+#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
+ if (a_phn0 == NULL) { \
r_phn = a_phn1; \
- else if (a_phn1 == NULL) \
+ } else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
- else if (a_cmp(a_phn0, a_phn1) < 0) { \
+ } else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
@@ -79,7 +80,7 @@ struct { \
} \
} while (0)
-#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
@@ -95,8 +96,9 @@ struct { \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
- if (phnrest != NULL) \
+ if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
+ } \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
@@ -150,8 +152,9 @@ struct { \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
- if (head == NULL) \
+ if (head == NULL) { \
break; \
+ } \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
@@ -164,7 +167,7 @@ struct { \
r_phn = phn0; \
} while (0)
-#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
+#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
@@ -177,11 +180,11 @@ struct { \
} \
} while (0)
-#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
- if (lchild == NULL) \
+ if (lchild == NULL) { \
r_phn = NULL; \
- else { \
+ } else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
@@ -191,44 +194,50 @@ struct { \
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
-#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
+#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
+a_attr a_type *a_prefix##any(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
+a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
-#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
+#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
-a_prefix##new(a_ph_type *ph) \
-{ \
- \
+a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
-a_prefix##empty(a_ph_type *ph) \
-{ \
- \
+a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
-a_prefix##first(a_ph_type *ph) \
-{ \
- \
- if (ph->ph_root == NULL) \
- return (NULL); \
+a_prefix##first(a_ph_type *ph) { \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
- return (ph->ph_root); \
+ return ph->ph_root; \
+} \
+a_attr a_type * \
+a_prefix##any(a_ph_type *ph) { \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
+ a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
+ if (aux != NULL) { \
+ return aux; \
+ } \
+ return ph->ph_root; \
} \
a_attr void \
-a_prefix##insert(a_ph_type *ph, a_type *phn) \
-{ \
- \
+a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
@@ -239,9 +248,9 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
- if (ph->ph_root == NULL) \
+ if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
- else { \
+ } else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
@@ -255,12 +264,12 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
} \
} \
a_attr a_type * \
-a_prefix##remove_first(a_ph_type *ph) \
-{ \
+a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
- if (ph->ph_root == NULL) \
- return (NULL); \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
@@ -268,18 +277,54 @@ a_prefix##remove_first(a_ph_type *ph) \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
- return (ret); \
+ return ret; \
+} \
+a_attr a_type * \
+a_prefix##remove_any(a_ph_type *ph) { \
+ /* \
+ * Remove the most recently inserted aux list element, or the \
+ * root if the aux list is empty. This has the effect of \
+ * behaving as a LIFO (and insertion/removal is therefore \
+ * constant-time) if a_prefix##[remove_]first() are never \
+ * called. \
+ */ \
+ if (ph->ph_root == NULL) { \
+ return NULL; \
+ } \
+ a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
+ if (ret != NULL) { \
+ a_type *aux = phn_next_get(a_type, a_field, ret); \
+ phn_next_set(a_type, a_field, ph->ph_root, aux); \
+ if (aux != NULL) { \
+ phn_prev_set(a_type, a_field, aux, \
+ ph->ph_root); \
+ } \
+ return ret; \
+ } \
+ ret = ph->ph_root; \
+ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
+ ph->ph_root); \
+ return ret; \
} \
a_attr void \
-a_prefix##remove(a_ph_type *ph, a_type *phn) \
-{ \
+a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
- /* \
- * We can delete from aux list without merging it, but we need \
- * to merge if we are dealing with the root node. \
- */ \
if (ph->ph_root == phn) { \
+ /* \
+ * We can delete from aux list without merging it, but \
+ * we need to merge if we are dealing with the root \
+ * node and it has children. \
+ */ \
+ if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
+ ph->ph_root = phn_next_get(a_type, a_field, \
+ phn); \
+ if (ph->ph_root != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ ph->ph_root, NULL); \
+ } \
+ return; \
+ } \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
@@ -290,8 +335,9 @@ a_prefix##remove(a_ph_type *ph, a_type *phn) \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
- if (phn_lchild_get(a_type, a_field, parent) != phn) \
+ if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
+ } \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
index 80917e8434fb..f3d9e618d635 100644
--- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -1,638 +1,369 @@
-#define a0dalloc JEMALLOC_N(a0dalloc)
-#define a0get JEMALLOC_N(a0get)
-#define a0malloc JEMALLOC_N(a0malloc)
-#define arena_aalloc JEMALLOC_N(arena_aalloc)
-#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
-#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
-#define arena_bin_index JEMALLOC_N(arena_bin_index)
-#define arena_bin_info JEMALLOC_N(arena_bin_info)
-#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
-#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
-#define arena_boot JEMALLOC_N(arena_boot)
-#define arena_choose JEMALLOC_N(arena_choose)
-#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
-#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
-#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
-#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
-#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
-#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
-#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand)
-#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink)
-#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar)
-#define arena_cleanup JEMALLOC_N(arena_cleanup)
-#define arena_dalloc JEMALLOC_N(arena_dalloc)
-#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
-#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
-#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked)
-#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
-#define arena_decay_tick JEMALLOC_N(arena_decay_tick)
-#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks)
-#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get)
-#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set)
-#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get)
-#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
-#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
-#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
-#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
-#define arena_get JEMALLOC_N(arena_get)
-#define arena_ichoose JEMALLOC_N(arena_ichoose)
-#define arena_init JEMALLOC_N(arena_init)
-#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
-#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
-#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get)
-#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set)
-#define arena_malloc JEMALLOC_N(arena_malloc)
-#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
-#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
-#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
-#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
-#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get)
-#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
-#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
-#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set)
-#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
-#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
-#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
-#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
-#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode)
-#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode)
-#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
-#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
-#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
-#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
-#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
-#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
-#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
-#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
-#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
-#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
-#define arena_maxrun JEMALLOC_N(arena_maxrun)
-#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge)
-#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add)
-#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
-#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
-#define arena_migrate JEMALLOC_N(arena_migrate)
-#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
-#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
-#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
-#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
-#define arena_new JEMALLOC_N(arena_new)
-#define arena_node_alloc JEMALLOC_N(arena_node_alloc)
-#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc)
-#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
-#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
-#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
-#define arena_palloc JEMALLOC_N(arena_palloc)
-#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
-#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
-#define arena_prefork0 JEMALLOC_N(arena_prefork0)
-#define arena_prefork1 JEMALLOC_N(arena_prefork1)
-#define arena_prefork2 JEMALLOC_N(arena_prefork2)
-#define arena_prefork3 JEMALLOC_N(arena_prefork3)
-#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
-#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
-#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
-#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
-#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get)
-#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset)
-#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set)
-#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
-#define arena_purge JEMALLOC_N(arena_purge)
-#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
-#define arena_ralloc JEMALLOC_N(arena_ralloc)
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
-#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
-#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
-#define arena_reset JEMALLOC_N(arena_reset)
-#define arena_run_regind JEMALLOC_N(arena_run_regind)
-#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
-#define arena_salloc JEMALLOC_N(arena_salloc)
-#define arena_sdalloc JEMALLOC_N(arena_sdalloc)
-#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
-#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
-#define arena_tdata_get JEMALLOC_N(arena_tdata_get)
-#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
-#define arenas JEMALLOC_N(arenas)
-#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup)
-#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
-#define atomic_add_p JEMALLOC_N(atomic_add_p)
-#define atomic_add_u JEMALLOC_N(atomic_add_u)
-#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
-#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
-#define atomic_add_z JEMALLOC_N(atomic_add_z)
-#define atomic_cas_p JEMALLOC_N(atomic_cas_p)
-#define atomic_cas_u JEMALLOC_N(atomic_cas_u)
-#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32)
-#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64)
-#define atomic_cas_z JEMALLOC_N(atomic_cas_z)
-#define atomic_sub_p JEMALLOC_N(atomic_sub_p)
-#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
-#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
-#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
-#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
-#define atomic_write_p JEMALLOC_N(atomic_write_p)
-#define atomic_write_u JEMALLOC_N(atomic_write_u)
-#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
-#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
-#define atomic_write_z JEMALLOC_N(atomic_write_z)
-#define base_alloc JEMALLOC_N(base_alloc)
-#define base_boot JEMALLOC_N(base_boot)
-#define base_postfork_child JEMALLOC_N(base_postfork_child)
-#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
-#define base_prefork JEMALLOC_N(base_prefork)
-#define base_stats_get JEMALLOC_N(base_stats_get)
-#define bitmap_full JEMALLOC_N(bitmap_full)
-#define bitmap_get JEMALLOC_N(bitmap_get)
-#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
-#define bitmap_init JEMALLOC_N(bitmap_init)
-#define bitmap_set JEMALLOC_N(bitmap_set)
-#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
-#define bitmap_size JEMALLOC_N(bitmap_size)
-#define bitmap_unset JEMALLOC_N(bitmap_unset)
-#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
-#define bootstrap_free JEMALLOC_N(bootstrap_free)
-#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
-#define bt_init JEMALLOC_N(bt_init)
-#define buferror JEMALLOC_N(buferror)
-#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
-#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache)
-#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
-#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
-#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
-#define chunk_boot JEMALLOC_N(chunk_boot)
-#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
-#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
-#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
-#define chunk_deregister JEMALLOC_N(chunk_deregister)
-#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
-#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable)
-#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
-#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
-#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default)
-#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get)
-#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set)
-#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
-#define chunk_lookup JEMALLOC_N(chunk_lookup)
-#define chunk_npages JEMALLOC_N(chunk_npages)
-#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
-#define chunk_register JEMALLOC_N(chunk_register)
-#define chunks_rtree JEMALLOC_N(chunks_rtree)
-#define chunksize JEMALLOC_N(chunksize)
-#define chunksize_mask JEMALLOC_N(chunksize_mask)
-#define ckh_count JEMALLOC_N(ckh_count)
-#define ckh_delete JEMALLOC_N(ckh_delete)
-#define ckh_insert JEMALLOC_N(ckh_insert)
-#define ckh_iter JEMALLOC_N(ckh_iter)
-#define ckh_new JEMALLOC_N(ckh_new)
-#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
-#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
-#define ckh_remove JEMALLOC_N(ckh_remove)
-#define ckh_search JEMALLOC_N(ckh_search)
-#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
-#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
-#define ctl_boot JEMALLOC_N(ctl_boot)
-#define ctl_bymib JEMALLOC_N(ctl_bymib)
-#define ctl_byname JEMALLOC_N(ctl_byname)
-#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
-#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
-#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
-#define ctl_prefork JEMALLOC_N(ctl_prefork)
-#define decay_ticker_get JEMALLOC_N(decay_ticker_get)
-#define dss_prec_names JEMALLOC_N(dss_prec_names)
-#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get)
-#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set)
-#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get)
-#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
-#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
-#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
-#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
-#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
-#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
-#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
-#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
-#define extent_node_init JEMALLOC_N(extent_node_init)
-#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get)
-#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
-#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
-#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
-#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
-#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
-#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
-#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
-#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
-#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
-#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
-#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
-#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
-#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
-#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
-#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
-#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
-#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
-#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
-#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
-#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
-#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
-#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
-#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
-#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
-#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
-#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
-#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
-#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
-#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
-#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
-#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
-#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
-#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
-#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
-#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
-#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
-#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
-#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
-#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
-#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
-#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
-#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
-#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
-#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
-#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
-#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
-#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
-#define ffs_llu JEMALLOC_N(ffs_llu)
-#define ffs_lu JEMALLOC_N(ffs_lu)
-#define ffs_u JEMALLOC_N(ffs_u)
-#define ffs_u32 JEMALLOC_N(ffs_u32)
-#define ffs_u64 JEMALLOC_N(ffs_u64)
-#define ffs_zu JEMALLOC_N(ffs_zu)
-#define get_errno JEMALLOC_N(get_errno)
-#define hash JEMALLOC_N(hash)
-#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
-#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
-#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
-#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
-#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
-#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
-#define hash_x64_128 JEMALLOC_N(hash_x64_128)
-#define hash_x86_128 JEMALLOC_N(hash_x86_128)
-#define hash_x86_32 JEMALLOC_N(hash_x86_32)
-#define huge_aalloc JEMALLOC_N(huge_aalloc)
-#define huge_dalloc JEMALLOC_N(huge_dalloc)
-#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
-#define huge_malloc JEMALLOC_N(huge_malloc)
-#define huge_palloc JEMALLOC_N(huge_palloc)
-#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get)
-#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset)
-#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set)
-#define huge_ralloc JEMALLOC_N(huge_ralloc)
-#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
-#define huge_salloc JEMALLOC_N(huge_salloc)
-#define iaalloc JEMALLOC_N(iaalloc)
-#define ialloc JEMALLOC_N(ialloc)
-#define iallocztm JEMALLOC_N(iallocztm)
-#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
-#define idalloc JEMALLOC_N(idalloc)
-#define idalloctm JEMALLOC_N(idalloctm)
-#define in_valgrind JEMALLOC_N(in_valgrind)
-#define index2size JEMALLOC_N(index2size)
-#define index2size_compute JEMALLOC_N(index2size_compute)
-#define index2size_lookup JEMALLOC_N(index2size_lookup)
-#define index2size_tab JEMALLOC_N(index2size_tab)
-#define ipalloc JEMALLOC_N(ipalloc)
-#define ipalloct JEMALLOC_N(ipalloct)
-#define ipallocztm JEMALLOC_N(ipallocztm)
-#define iqalloc JEMALLOC_N(iqalloc)
-#define iralloc JEMALLOC_N(iralloc)
-#define iralloct JEMALLOC_N(iralloct)
-#define iralloct_realign JEMALLOC_N(iralloct_realign)
-#define isalloc JEMALLOC_N(isalloc)
-#define isdalloct JEMALLOC_N(isdalloct)
-#define isqalloc JEMALLOC_N(isqalloc)
-#define ivsalloc JEMALLOC_N(ivsalloc)
-#define ixalloc JEMALLOC_N(ixalloc)
-#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
-#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
-#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
-#define large_maxclass JEMALLOC_N(large_maxclass)
-#define lg_floor JEMALLOC_N(lg_floor)
-#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
-#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
-#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
-#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
-#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
-#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
-#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
-#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
-#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
-#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
-#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
-#define malloc_printf JEMALLOC_N(malloc_printf)
-#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
-#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
-#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
-#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
-#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
-#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
-#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
-#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
-#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
-#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
-#define malloc_write JEMALLOC_N(malloc_write)
-#define map_bias JEMALLOC_N(map_bias)
-#define map_misc_offset JEMALLOC_N(map_misc_offset)
-#define mb_write JEMALLOC_N(mb_write)
-#define narenas_auto JEMALLOC_N(narenas_auto)
-#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
-#define narenas_total_get JEMALLOC_N(narenas_total_get)
-#define ncpus JEMALLOC_N(ncpus)
-#define nhbins JEMALLOC_N(nhbins)
-#define nhclasses JEMALLOC_N(nhclasses)
-#define nlclasses JEMALLOC_N(nlclasses)
-#define nstime_add JEMALLOC_N(nstime_add)
-#define nstime_compare JEMALLOC_N(nstime_compare)
-#define nstime_copy JEMALLOC_N(nstime_copy)
-#define nstime_divide JEMALLOC_N(nstime_divide)
-#define nstime_idivide JEMALLOC_N(nstime_idivide)
-#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
-#define nstime_init JEMALLOC_N(nstime_init)
-#define nstime_init2 JEMALLOC_N(nstime_init2)
-#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
-#define nstime_ns JEMALLOC_N(nstime_ns)
-#define nstime_nsec JEMALLOC_N(nstime_nsec)
-#define nstime_sec JEMALLOC_N(nstime_sec)
-#define nstime_subtract JEMALLOC_N(nstime_subtract)
-#define nstime_update JEMALLOC_N(nstime_update)
-#define opt_abort JEMALLOC_N(opt_abort)
-#define opt_decay_time JEMALLOC_N(opt_decay_time)
-#define opt_dss JEMALLOC_N(opt_dss)
-#define opt_junk JEMALLOC_N(opt_junk)
-#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
-#define opt_junk_free JEMALLOC_N(opt_junk_free)
-#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
-#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
-#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
-#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
-#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
-#define opt_narenas JEMALLOC_N(opt_narenas)
-#define opt_prof JEMALLOC_N(opt_prof)
-#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
-#define opt_prof_active JEMALLOC_N(opt_prof_active)
-#define opt_prof_final JEMALLOC_N(opt_prof_final)
-#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
-#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
-#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
-#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
-#define opt_purge JEMALLOC_N(opt_purge)
-#define opt_quarantine JEMALLOC_N(opt_quarantine)
-#define opt_redzone JEMALLOC_N(opt_redzone)
-#define opt_stats_print JEMALLOC_N(opt_stats_print)
-#define opt_tcache JEMALLOC_N(opt_tcache)
-#define opt_thp JEMALLOC_N(opt_thp)
-#define opt_utrace JEMALLOC_N(opt_utrace)
-#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
-#define opt_zero JEMALLOC_N(opt_zero)
-#define p2rz JEMALLOC_N(p2rz)
-#define pages_boot JEMALLOC_N(pages_boot)
-#define pages_commit JEMALLOC_N(pages_commit)
-#define pages_decommit JEMALLOC_N(pages_decommit)
-#define pages_huge JEMALLOC_N(pages_huge)
-#define pages_map JEMALLOC_N(pages_map)
-#define pages_nohuge JEMALLOC_N(pages_nohuge)
-#define pages_purge JEMALLOC_N(pages_purge)
-#define pages_trim JEMALLOC_N(pages_trim)
-#define pages_unmap JEMALLOC_N(pages_unmap)
-#define pind2sz JEMALLOC_N(pind2sz)
-#define pind2sz_compute JEMALLOC_N(pind2sz_compute)
-#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup)
-#define pind2sz_tab JEMALLOC_N(pind2sz_tab)
-#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32)
-#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64)
-#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
-#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32)
-#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64)
-#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu)
-#define prng_range_u32 JEMALLOC_N(prng_range_u32)
-#define prng_range_u64 JEMALLOC_N(prng_range_u64)
-#define prng_range_zu JEMALLOC_N(prng_range_zu)
-#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32)
-#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64)
-#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu)
-#define prof_active JEMALLOC_N(prof_active)
-#define prof_active_get JEMALLOC_N(prof_active_get)
-#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
-#define prof_active_set JEMALLOC_N(prof_active_set)
-#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
-#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
-#define prof_backtrace JEMALLOC_N(prof_backtrace)
-#define prof_boot0 JEMALLOC_N(prof_boot0)
-#define prof_boot1 JEMALLOC_N(prof_boot1)
-#define prof_boot2 JEMALLOC_N(prof_boot2)
-#define prof_bt_count JEMALLOC_N(prof_bt_count)
-#define prof_dump_header JEMALLOC_N(prof_dump_header)
-#define prof_dump_open JEMALLOC_N(prof_dump_open)
-#define prof_free JEMALLOC_N(prof_free)
-#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
-#define prof_gdump JEMALLOC_N(prof_gdump)
-#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
-#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked)
-#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
-#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
-#define prof_idump JEMALLOC_N(prof_idump)
-#define prof_interval JEMALLOC_N(prof_interval)
-#define prof_lookup JEMALLOC_N(prof_lookup)
-#define prof_malloc JEMALLOC_N(prof_malloc)
-#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
-#define prof_mdump JEMALLOC_N(prof_mdump)
-#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
-#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
-#define prof_prefork0 JEMALLOC_N(prof_prefork0)
-#define prof_prefork1 JEMALLOC_N(prof_prefork1)
-#define prof_realloc JEMALLOC_N(prof_realloc)
-#define prof_reset JEMALLOC_N(prof_reset)
-#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
-#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
-#define prof_tctx_get JEMALLOC_N(prof_tctx_get)
-#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
-#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
-#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
-#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
-#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
-#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
-#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
-#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
-#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
-#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
-#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
-#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
-#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
-#define psz2ind JEMALLOC_N(psz2ind)
-#define psz2u JEMALLOC_N(psz2u)
-#define purge_mode_names JEMALLOC_N(purge_mode_names)
-#define quarantine JEMALLOC_N(quarantine)
-#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
-#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work)
-#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
-#define rtree_child_read JEMALLOC_N(rtree_child_read)
-#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard)
-#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread)
-#define rtree_delete JEMALLOC_N(rtree_delete)
-#define rtree_get JEMALLOC_N(rtree_get)
-#define rtree_new JEMALLOC_N(rtree_new)
-#define rtree_node_valid JEMALLOC_N(rtree_node_valid)
-#define rtree_set JEMALLOC_N(rtree_set)
-#define rtree_start_level JEMALLOC_N(rtree_start_level)
-#define rtree_subkey JEMALLOC_N(rtree_subkey)
-#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read)
-#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard)
-#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread)
-#define rtree_val_read JEMALLOC_N(rtree_val_read)
-#define rtree_val_write JEMALLOC_N(rtree_val_write)
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-#define s2u JEMALLOC_N(s2u)
-#define s2u_compute JEMALLOC_N(s2u_compute)
-#define s2u_lookup JEMALLOC_N(s2u_lookup)
-#define sa2u JEMALLOC_N(sa2u)
-#define set_errno JEMALLOC_N(set_errno)
-#define size2index JEMALLOC_N(size2index)
-#define size2index_compute JEMALLOC_N(size2index_compute)
-#define size2index_lookup JEMALLOC_N(size2index_lookup)
-#define size2index_tab JEMALLOC_N(size2index_tab)
-#define spin_adaptive JEMALLOC_N(spin_adaptive)
-#define spin_init JEMALLOC_N(spin_init)
-#define stats_cactive JEMALLOC_N(stats_cactive)
-#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
-#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
-#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
-#define stats_print JEMALLOC_N(stats_print)
-#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
-#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
-#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
-#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
-#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
-#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
-#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
-#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
-#define tcache_boot JEMALLOC_N(tcache_boot)
-#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
-#define tcache_create JEMALLOC_N(tcache_create)
-#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
-#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
-#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup)
-#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
-#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
-#define tcache_event JEMALLOC_N(tcache_event)
-#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
-#define tcache_flush JEMALLOC_N(tcache_flush)
-#define tcache_get JEMALLOC_N(tcache_get)
-#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
-#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
-#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
-#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
-#define tcache_prefork JEMALLOC_N(tcache_prefork)
-#define tcache_salloc JEMALLOC_N(tcache_salloc)
-#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
-#define tcaches JEMALLOC_N(tcaches)
-#define tcaches_create JEMALLOC_N(tcaches_create)
-#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
-#define tcaches_flush JEMALLOC_N(tcaches_flush)
-#define tcaches_get JEMALLOC_N(tcaches_get)
-#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup)
-#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup)
-#define ticker_copy JEMALLOC_N(ticker_copy)
-#define ticker_init JEMALLOC_N(ticker_init)
-#define ticker_read JEMALLOC_N(ticker_read)
-#define ticker_tick JEMALLOC_N(ticker_tick)
-#define ticker_ticks JEMALLOC_N(ticker_ticks)
-#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
-#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
-#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
-#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
-#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
-#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
-#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
-#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
-#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
-#define tsd_boot JEMALLOC_N(tsd_boot)
-#define tsd_boot0 JEMALLOC_N(tsd_boot0)
-#define tsd_boot1 JEMALLOC_N(tsd_boot1)
-#define tsd_booted JEMALLOC_N(tsd_booted)
-#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
-#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
-#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
-#define tsd_fetch JEMALLOC_N(tsd_fetch)
-#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl)
-#define tsd_get JEMALLOC_N(tsd_get)
-#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates)
-#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
-#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
-#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
-#define tsd_initialized JEMALLOC_N(tsd_initialized)
-#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
-#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
-#define tsd_init_head JEMALLOC_N(tsd_init_head)
-#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
-#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
-#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
-#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
-#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
-#define tsd_nominal JEMALLOC_N(tsd_nominal)
-#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
-#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
-#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
-#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
-#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
-#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
-#define tsd_set JEMALLOC_N(tsd_set)
-#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
-#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
-#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
-#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
-#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
-#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
-#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
-#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
-#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
-#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
-#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
-#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
-#define tsd_tls JEMALLOC_N(tsd_tls)
-#define tsd_tsd JEMALLOC_N(tsd_tsd)
-#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
-#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
-#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
-#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
-#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
-#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
-#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
-#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
-#define tsdn_null JEMALLOC_N(tsdn_null)
-#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
-#define u2rz JEMALLOC_N(u2rz)
-#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
-#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
-#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
-#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
-#define witness_assert_depth JEMALLOC_N(witness_assert_depth)
-#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank)
-#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
-#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
-#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
-#define witness_depth_error JEMALLOC_N(witness_depth_error)
-#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
-#define witness_init JEMALLOC_N(witness_init)
-#define witness_lock JEMALLOC_N(witness_lock)
-#define witness_lock_error JEMALLOC_N(witness_lock_error)
-#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-#define witness_owner JEMALLOC_N(witness_owner)
-#define witness_owner_error JEMALLOC_N(witness_owner_error)
-#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
-#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
-#define witness_prefork JEMALLOC_N(witness_prefork)
-#define witness_unlock JEMALLOC_N(witness_unlock)
-#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
-#define zone_register JEMALLOC_N(zone_register)
+#define a0dalloc JEMALLOC_N(a0dalloc)
+#define a0malloc JEMALLOC_N(a0malloc)
+#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
+#define arena_cleanup JEMALLOC_N(arena_cleanup)
+#define arena_init JEMALLOC_N(arena_init)
+#define arena_migrate JEMALLOC_N(arena_migrate)
+#define arena_set JEMALLOC_N(arena_set)
+#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
+#define arenas JEMALLOC_N(arenas)
+#define arenas_lock JEMALLOC_N(arenas_lock)
+#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
+#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
+#define bootstrap_free JEMALLOC_N(bootstrap_free)
+#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
+#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
+#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
+#define malloc_initialized JEMALLOC_N(malloc_initialized)
+#define malloc_slow JEMALLOC_N(malloc_slow)
+#define narenas_auto JEMALLOC_N(narenas_auto)
+#define narenas_total_get JEMALLOC_N(narenas_total_get)
+#define ncpus JEMALLOC_N(ncpus)
+#define opt_abort JEMALLOC_N(opt_abort)
+#define opt_abort_conf JEMALLOC_N(opt_abort_conf)
+#define opt_junk JEMALLOC_N(opt_junk)
+#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
+#define opt_junk_free JEMALLOC_N(opt_junk_free)
+#define opt_narenas JEMALLOC_N(opt_narenas)
+#define opt_utrace JEMALLOC_N(opt_utrace)
+#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
+#define opt_zero JEMALLOC_N(opt_zero)
+#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
+#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
+#define arena_bin_info JEMALLOC_N(arena_bin_info)
+#define arena_boot JEMALLOC_N(arena_boot)
+#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+#define arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted)
+#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
+#define arena_decay JEMALLOC_N(arena_decay)
+#define arena_destroy JEMALLOC_N(arena_destroy)
+#define arena_dirty_decay_ms_default_get JEMALLOC_N(arena_dirty_decay_ms_default_get)
+#define arena_dirty_decay_ms_default_set JEMALLOC_N(arena_dirty_decay_ms_default_set)
+#define arena_dirty_decay_ms_get JEMALLOC_N(arena_dirty_decay_ms_get)
+#define arena_dirty_decay_ms_set JEMALLOC_N(arena_dirty_decay_ms_set)
+#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
+#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
+#define arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large)
+#define arena_extent_dalloc_large_prep JEMALLOC_N(arena_extent_dalloc_large_prep)
+#define arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand)
+#define arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink)
+#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
+#define arena_extents_dirty_dalloc JEMALLOC_N(arena_extents_dirty_dalloc)
+#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
+#define arena_muzzy_decay_ms_default_get JEMALLOC_N(arena_muzzy_decay_ms_default_get)
+#define arena_muzzy_decay_ms_default_set JEMALLOC_N(arena_muzzy_decay_ms_default_set)
+#define arena_muzzy_decay_ms_get JEMALLOC_N(arena_muzzy_decay_ms_get)
+#define arena_muzzy_decay_ms_set JEMALLOC_N(arena_muzzy_decay_ms_set)
+#define arena_new JEMALLOC_N(arena_new)
+#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
+#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
+#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
+#define arena_palloc JEMALLOC_N(arena_palloc)
+#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
+#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
+#define arena_prefork0 JEMALLOC_N(arena_prefork0)
+#define arena_prefork1 JEMALLOC_N(arena_prefork1)
+#define arena_prefork2 JEMALLOC_N(arena_prefork2)
+#define arena_prefork3 JEMALLOC_N(arena_prefork3)
+#define arena_prefork4 JEMALLOC_N(arena_prefork4)
+#define arena_prefork5 JEMALLOC_N(arena_prefork5)
+#define arena_prefork6 JEMALLOC_N(arena_prefork6)
+#define arena_prof_promote JEMALLOC_N(arena_prof_promote)
+#define arena_ralloc JEMALLOC_N(arena_ralloc)
+#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
+#define arena_reset JEMALLOC_N(arena_reset)
+#define arena_stats_large_nrequests_add JEMALLOC_N(arena_stats_large_nrequests_add)
+#define arena_stats_mapped_add JEMALLOC_N(arena_stats_mapped_add)
+#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
+#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
+#define h_steps JEMALLOC_N(h_steps)
+#define opt_dirty_decay_ms JEMALLOC_N(opt_dirty_decay_ms)
+#define opt_muzzy_decay_ms JEMALLOC_N(opt_muzzy_decay_ms)
+#define opt_percpu_arena JEMALLOC_N(opt_percpu_arena)
+#define percpu_arena_mode_names JEMALLOC_N(percpu_arena_mode_names)
+#define background_thread_boot0 JEMALLOC_N(background_thread_boot0)
+#define background_thread_boot1 JEMALLOC_N(background_thread_boot1)
+#define background_thread_create JEMALLOC_N(background_thread_create)
+#define background_thread_ctl_init JEMALLOC_N(background_thread_ctl_init)
+#define background_thread_enabled_state JEMALLOC_N(background_thread_enabled_state)
+#define background_thread_info JEMALLOC_N(background_thread_info)
+#define background_thread_interval_check JEMALLOC_N(background_thread_interval_check)
+#define background_thread_lock JEMALLOC_N(background_thread_lock)
+#define background_thread_postfork_child JEMALLOC_N(background_thread_postfork_child)
+#define background_thread_postfork_parent JEMALLOC_N(background_thread_postfork_parent)
+#define background_thread_prefork0 JEMALLOC_N(background_thread_prefork0)
+#define background_thread_prefork1 JEMALLOC_N(background_thread_prefork1)
+#define background_thread_stats_read JEMALLOC_N(background_thread_stats_read)
+#define background_threads_disable JEMALLOC_N(background_threads_disable)
+#define background_threads_enable JEMALLOC_N(background_threads_enable)
+#define can_enable_background_thread JEMALLOC_N(can_enable_background_thread)
+#define n_background_threads JEMALLOC_N(n_background_threads)
+#define opt_background_thread JEMALLOC_N(opt_background_thread)
+#define pthread_create_wrapper JEMALLOC_N(pthread_create_wrapper)
+#define b0get JEMALLOC_N(b0get)
+#define base_alloc JEMALLOC_N(base_alloc)
+#define base_alloc_extent JEMALLOC_N(base_alloc_extent)
+#define base_boot JEMALLOC_N(base_boot)
+#define base_delete JEMALLOC_N(base_delete)
+#define base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get)
+#define base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set)
+#define base_new JEMALLOC_N(base_new)
+#define base_postfork_child JEMALLOC_N(base_postfork_child)
+#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
+#define base_prefork JEMALLOC_N(base_prefork)
+#define base_stats_get JEMALLOC_N(base_stats_get)
+#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
+#define bitmap_init JEMALLOC_N(bitmap_init)
+#define bitmap_size JEMALLOC_N(bitmap_size)
+#define ckh_count JEMALLOC_N(ckh_count)
+#define ckh_delete JEMALLOC_N(ckh_delete)
+#define ckh_insert JEMALLOC_N(ckh_insert)
+#define ckh_iter JEMALLOC_N(ckh_iter)
+#define ckh_new JEMALLOC_N(ckh_new)
+#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
+#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
+#define ckh_remove JEMALLOC_N(ckh_remove)
+#define ckh_search JEMALLOC_N(ckh_search)
+#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
+#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
+#define ctl_boot JEMALLOC_N(ctl_boot)
+#define ctl_bymib JEMALLOC_N(ctl_bymib)
+#define ctl_byname JEMALLOC_N(ctl_byname)
+#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
+#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
+#define ctl_prefork JEMALLOC_N(ctl_prefork)
+#define extent_alloc JEMALLOC_N(extent_alloc)
+#define extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper)
+#define extent_avail_destroy JEMALLOC_N(extent_avail_destroy)
+#define extent_avail_destroy_recurse JEMALLOC_N(extent_avail_destroy_recurse)
+#define extent_avail_empty JEMALLOC_N(extent_avail_empty)
+#define extent_avail_first JEMALLOC_N(extent_avail_first)
+#define extent_avail_insert JEMALLOC_N(extent_avail_insert)
+#define extent_avail_iter JEMALLOC_N(extent_avail_iter)
+#define extent_avail_iter_recurse JEMALLOC_N(extent_avail_iter_recurse)
+#define extent_avail_iter_start JEMALLOC_N(extent_avail_iter_start)
+#define extent_avail_last JEMALLOC_N(extent_avail_last)
+#define extent_avail_new JEMALLOC_N(extent_avail_new)
+#define extent_avail_next JEMALLOC_N(extent_avail_next)
+#define extent_avail_nsearch JEMALLOC_N(extent_avail_nsearch)
+#define extent_avail_prev JEMALLOC_N(extent_avail_prev)
+#define extent_avail_psearch JEMALLOC_N(extent_avail_psearch)
+#define extent_avail_remove JEMALLOC_N(extent_avail_remove)
+#define extent_avail_reverse_iter JEMALLOC_N(extent_avail_reverse_iter)
+#define extent_avail_reverse_iter_recurse JEMALLOC_N(extent_avail_reverse_iter_recurse)
+#define extent_avail_reverse_iter_start JEMALLOC_N(extent_avail_reverse_iter_start)
+#define extent_avail_search JEMALLOC_N(extent_avail_search)
+#define extent_boot JEMALLOC_N(extent_boot)
+#define extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper)
+#define extent_dalloc JEMALLOC_N(extent_dalloc)
+#define extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap)
+#define extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper)
+#define extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper)
+#define extent_destroy_wrapper JEMALLOC_N(extent_destroy_wrapper)
+#define extent_heap_any JEMALLOC_N(extent_heap_any)
+#define extent_heap_empty JEMALLOC_N(extent_heap_empty)
+#define extent_heap_first JEMALLOC_N(extent_heap_first)
+#define extent_heap_insert JEMALLOC_N(extent_heap_insert)
+#define extent_heap_new JEMALLOC_N(extent_heap_new)
+#define extent_heap_remove JEMALLOC_N(extent_heap_remove)
+#define extent_heap_remove_any JEMALLOC_N(extent_heap_remove_any)
+#define extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first)
+#define extent_hooks_default JEMALLOC_N(extent_hooks_default)
+#define extent_hooks_get JEMALLOC_N(extent_hooks_get)
+#define extent_hooks_set JEMALLOC_N(extent_hooks_set)
+#define extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper)
+#define extent_mutex_pool JEMALLOC_N(extent_mutex_pool)
+#define extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper)
+#define extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper)
+#define extent_split_wrapper JEMALLOC_N(extent_split_wrapper)
+#define extents_alloc JEMALLOC_N(extents_alloc)
+#define extents_dalloc JEMALLOC_N(extents_dalloc)
+#define extents_evict JEMALLOC_N(extents_evict)
+#define extents_init JEMALLOC_N(extents_init)
+#define extents_npages_get JEMALLOC_N(extents_npages_get)
+#define extents_postfork_child JEMALLOC_N(extents_postfork_child)
+#define extents_postfork_parent JEMALLOC_N(extents_postfork_parent)
+#define extents_prefork JEMALLOC_N(extents_prefork)
+#define extents_rtree JEMALLOC_N(extents_rtree)
+#define extents_state_get JEMALLOC_N(extents_state_get)
+#define dss_prec_names JEMALLOC_N(dss_prec_names)
+#define extent_alloc_dss JEMALLOC_N(extent_alloc_dss)
+#define extent_dss_boot JEMALLOC_N(extent_dss_boot)
+#define extent_dss_mergeable JEMALLOC_N(extent_dss_mergeable)
+#define extent_dss_prec_get JEMALLOC_N(extent_dss_prec_get)
+#define extent_dss_prec_set JEMALLOC_N(extent_dss_prec_set)
+#define extent_in_dss JEMALLOC_N(extent_in_dss)
+#define opt_dss JEMALLOC_N(opt_dss)
+#define extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap)
+#define extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap)
+#define opt_retain JEMALLOC_N(opt_retain)
+#define hooks_arena_new_hook JEMALLOC_N(hooks_arena_new_hook)
+#define hooks_libc_hook JEMALLOC_N(hooks_libc_hook)
+#define large_dalloc JEMALLOC_N(large_dalloc)
+#define large_dalloc_finish JEMALLOC_N(large_dalloc_finish)
+#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
+#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
+#define large_dalloc_prep_junked_locked JEMALLOC_N(large_dalloc_prep_junked_locked)
+#define large_malloc JEMALLOC_N(large_malloc)
+#define large_palloc JEMALLOC_N(large_palloc)
+#define large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get)
+#define large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset)
+#define large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set)
+#define large_ralloc JEMALLOC_N(large_ralloc)
+#define large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move)
+#define large_salloc JEMALLOC_N(large_salloc)
+#define buferror JEMALLOC_N(buferror)
+#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
+#define malloc_printf JEMALLOC_N(malloc_printf)
+#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
+#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
+#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
+#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
+#define malloc_write JEMALLOC_N(malloc_write)
+#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
+#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
+#define malloc_mutex_lock_slow JEMALLOC_N(malloc_mutex_lock_slow)
+#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
+#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
+#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
+#define malloc_mutex_prof_data_reset JEMALLOC_N(malloc_mutex_prof_data_reset)
+#define mutex_pool_init JEMALLOC_N(mutex_pool_init)
+#define nstime_add JEMALLOC_N(nstime_add)
+#define nstime_compare JEMALLOC_N(nstime_compare)
+#define nstime_copy JEMALLOC_N(nstime_copy)
+#define nstime_divide JEMALLOC_N(nstime_divide)
+#define nstime_iadd JEMALLOC_N(nstime_iadd)
+#define nstime_idivide JEMALLOC_N(nstime_idivide)
+#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
+#define nstime_init JEMALLOC_N(nstime_init)
+#define nstime_init2 JEMALLOC_N(nstime_init2)
+#define nstime_isubtract JEMALLOC_N(nstime_isubtract)
+#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
+#define nstime_msec JEMALLOC_N(nstime_msec)
+#define nstime_ns JEMALLOC_N(nstime_ns)
+#define nstime_nsec JEMALLOC_N(nstime_nsec)
+#define nstime_sec JEMALLOC_N(nstime_sec)
+#define nstime_subtract JEMALLOC_N(nstime_subtract)
+#define nstime_update JEMALLOC_N(nstime_update)
+#define pages_boot JEMALLOC_N(pages_boot)
+#define pages_commit JEMALLOC_N(pages_commit)
+#define pages_decommit JEMALLOC_N(pages_decommit)
+#define pages_huge JEMALLOC_N(pages_huge)
+#define pages_map JEMALLOC_N(pages_map)
+#define pages_nohuge JEMALLOC_N(pages_nohuge)
+#define pages_purge_forced JEMALLOC_N(pages_purge_forced)
+#define pages_purge_lazy JEMALLOC_N(pages_purge_lazy)
+#define pages_unmap JEMALLOC_N(pages_unmap)
+#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx)
+#define bt_init JEMALLOC_N(bt_init)
+#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
+#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
+#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
+#define opt_prof JEMALLOC_N(opt_prof)
+#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
+#define opt_prof_active JEMALLOC_N(opt_prof_active)
+#define opt_prof_final JEMALLOC_N(opt_prof_final)
+#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
+#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
+#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
+#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
+#define prof_accum_init JEMALLOC_N(prof_accum_init)
+#define prof_active JEMALLOC_N(prof_active)
+#define prof_active_get JEMALLOC_N(prof_active_get)
+#define prof_active_set JEMALLOC_N(prof_active_set)
+#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
+#define prof_backtrace JEMALLOC_N(prof_backtrace)
+#define prof_boot0 JEMALLOC_N(prof_boot0)
+#define prof_boot1 JEMALLOC_N(prof_boot1)
+#define prof_boot2 JEMALLOC_N(prof_boot2)
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
+#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
+#define prof_gdump JEMALLOC_N(prof_gdump)
+#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
+#define prof_gdump_set JEMALLOC_N(prof_gdump_set)
+#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
+#define prof_idump JEMALLOC_N(prof_idump)
+#define prof_interval JEMALLOC_N(prof_interval)
+#define prof_lookup JEMALLOC_N(prof_lookup)
+#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
+#define prof_mdump JEMALLOC_N(prof_mdump)
+#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
+#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
+#define prof_prefork0 JEMALLOC_N(prof_prefork0)
+#define prof_prefork1 JEMALLOC_N(prof_prefork1)
+#define prof_reset JEMALLOC_N(prof_reset)
+#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
+#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
+#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
+#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
+#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get)
+#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get)
+#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set)
+#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
+#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
+#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
+#define rtree_ctx_data_init JEMALLOC_N(rtree_ctx_data_init)
+#define rtree_leaf_alloc JEMALLOC_N(rtree_leaf_alloc)
+#define rtree_leaf_dalloc JEMALLOC_N(rtree_leaf_dalloc)
+#define rtree_leaf_elm_lookup_hard JEMALLOC_N(rtree_leaf_elm_lookup_hard)
+#define rtree_new JEMALLOC_N(rtree_new)
+#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
+#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
+#define arena_mutex_names JEMALLOC_N(arena_mutex_names)
+#define global_mutex_names JEMALLOC_N(global_mutex_names)
+#define opt_stats_print JEMALLOC_N(opt_stats_print)
+#define opt_stats_print_opts JEMALLOC_N(opt_stats_print_opts)
+#define stats_print JEMALLOC_N(stats_print)
+#define spin_adaptive JEMALLOC_N(spin_adaptive)
+#define sz_index2size_tab JEMALLOC_N(sz_index2size_tab)
+#define sz_pind2sz_tab JEMALLOC_N(sz_pind2sz_tab)
+#define sz_size2index_tab JEMALLOC_N(sz_size2index_tab)
+#define nhbins JEMALLOC_N(nhbins)
+#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
+#define opt_tcache JEMALLOC_N(opt_tcache)
+#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
+#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
+#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
+#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
+#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
+#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
+#define tcache_boot JEMALLOC_N(tcache_boot)
+#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
+#define tcache_create_explicit JEMALLOC_N(tcache_create_explicit)
+#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
+#define tcache_flush JEMALLOC_N(tcache_flush)
+#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
+#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
+#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
+#define tcache_prefork JEMALLOC_N(tcache_prefork)
+#define tcache_salloc JEMALLOC_N(tcache_salloc)
+#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
+#define tcaches JEMALLOC_N(tcaches)
+#define tcaches_create JEMALLOC_N(tcaches_create)
+#define tcaches_destroy JEMALLOC_N(tcaches_destroy)
+#define tcaches_flush JEMALLOC_N(tcaches_flush)
+#define tsd_tcache_data_init JEMALLOC_N(tsd_tcache_data_init)
+#define tsd_tcache_enabled_data_init JEMALLOC_N(tsd_tcache_enabled_data_init)
+#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
+#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
+#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
+#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
+#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
+#define tsd_booted JEMALLOC_N(tsd_booted)
+#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
+#define tsd_fetch_slow JEMALLOC_N(tsd_fetch_slow)
+#define tsd_initialized JEMALLOC_N(tsd_initialized)
+#define tsd_slow_update JEMALLOC_N(tsd_slow_update)
+#define tsd_tls JEMALLOC_N(tsd_tls)
+#define witness_depth_error JEMALLOC_N(witness_depth_error)
+#define witness_init JEMALLOC_N(witness_init)
+#define witness_lock_error JEMALLOC_N(witness_lock_error)
+#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
+#define witness_owner_error JEMALLOC_N(witness_owner_error)
+#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
+#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
+#define witness_prefork JEMALLOC_N(witness_prefork)
+#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
diff --git a/contrib/jemalloc/include/jemalloc/internal/prng.h b/contrib/jemalloc/include/jemalloc/internal/prng.h
index c2bda19c6b09..15cc2d18fa4d 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prng.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prng.h
@@ -1,5 +1,8 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_PRNG_H
+#define JEMALLOC_INTERNAL_PRNG_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bit_util.h"
/*
* Simple linear congruential pseudo-random number generator:
@@ -20,95 +23,71 @@
* bits.
*/
-#define PRNG_A_32 UINT32_C(1103515241)
-#define PRNG_C_32 UINT32_C(12347)
-
-#define PRNG_A_64 UINT64_C(6364136223846793005)
-#define PRNG_C_64 UINT64_C(1442695040888963407)
-
-#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
+/* INTERNAL DEFINITIONS -- IGNORE */
/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
+#define PRNG_A_32 UINT32_C(1103515241)
+#define PRNG_C_32 UINT32_C(12347)
-#ifndef JEMALLOC_ENABLE_INLINE
-uint32_t prng_state_next_u32(uint32_t state);
-uint64_t prng_state_next_u64(uint64_t state);
-size_t prng_state_next_zu(size_t state);
+#define PRNG_A_64 UINT64_C(6364136223846793005)
+#define PRNG_C_64 UINT64_C(1442695040888963407)
-uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
- bool atomic);
-uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
-size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
-
-uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
-uint64_t prng_range_u64(uint64_t *state, uint64_t range);
-size_t prng_range_zu(size_t *state, size_t range, bool atomic);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state)
-{
-
- return ((state * PRNG_A_32) + PRNG_C_32);
+prng_state_next_u32(uint32_t state) {
+ return (state * PRNG_A_32) + PRNG_C_32;
}
JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state)
-{
-
- return ((state * PRNG_A_64) + PRNG_C_64);
+prng_state_next_u64(uint64_t state) {
+ return (state * PRNG_A_64) + PRNG_C_64;
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state)
-{
-
+prng_state_next_zu(size_t state) {
#if LG_SIZEOF_PTR == 2
- return ((state * PRNG_A_32) + PRNG_C_32);
+ return (state * PRNG_A_32) + PRNG_C_32;
#elif LG_SIZEOF_PTR == 3
- return ((state * PRNG_A_64) + PRNG_C_64);
+ return (state * PRNG_A_64) + PRNG_C_64;
#else
#error Unsupported pointer size
#endif
}
+/******************************************************************************/
+/* BEGIN PUBLIC API */
+/******************************************************************************/
+
+/*
+ * The prng_lg_range functions give a uniform int in the half-open range [0,
+ * 2**lg_range). If atomic is true, they do so safely from multiple threads.
+ * Multithreaded 64-bit prngs aren't supported.
+ */
+
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
-{
- uint32_t ret, state1;
+prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
+ uint32_t ret, state0, state1;
assert(lg_range > 0);
assert(lg_range <= 32);
- if (atomic) {
- uint32_t state0;
+ state0 = atomic_load_u32(state, ATOMIC_RELAXED);
+ if (atomic) {
do {
- state0 = atomic_read_uint32(state);
state1 = prng_state_next_u32(state0);
- } while (atomic_cas_uint32(state, state0, state1));
+ } while (!atomic_compare_exchange_weak_u32(state, &state0,
+ state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
} else {
- state1 = prng_state_next_u32(*state);
- *state = state1;
+ state1 = prng_state_next_u32(state0);
+ atomic_store_u32(state, state1, ATOMIC_RELAXED);
}
ret = state1 >> (32 - lg_range);
- return (ret);
+ return ret;
}
-/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range)
-{
+prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
uint64_t ret, state1;
assert(lg_range > 0);
@@ -118,36 +97,39 @@ prng_lg_range_u64(uint64_t *state, unsigned lg_range)
*state = state1;
ret = state1 >> (64 - lg_range);
- return (ret);
+ return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
-{
- size_t ret, state1;
+prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
+ size_t ret, state0, state1;
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
- if (atomic) {
- size_t state0;
+ state0 = atomic_load_zu(state, ATOMIC_RELAXED);
+ if (atomic) {
do {
- state0 = atomic_read_z(state);
state1 = prng_state_next_zu(state0);
- } while (atomic_cas_z(state, state0, state1));
+ } while (atomic_compare_exchange_weak_zu(state, &state0,
+ state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
} else {
- state1 = prng_state_next_zu(*state);
- *state = state1;
+ state1 = prng_state_next_zu(state0);
+ atomic_store_zu(state, state1, ATOMIC_RELAXED);
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
- return (ret);
+ return ret;
}
+/*
+ * The prng_range functions behave like the prng_lg_range, but return a result
+ * in [0, range) instead of [0, 2**lg_range).
+ */
+
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
-{
+prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
uint32_t ret;
unsigned lg_range;
@@ -161,12 +143,11 @@ prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
- return (ret);
+ return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range)
-{
+prng_range_u64(uint64_t *state, uint64_t range) {
uint64_t ret;
unsigned lg_range;
@@ -180,12 +161,11 @@ prng_range_u64(uint64_t *state, uint64_t range)
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
- return (ret);
+ return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(size_t *state, size_t range, bool atomic)
-{
+prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
size_t ret;
unsigned lg_range;
@@ -199,9 +179,7 @@ prng_range_zu(size_t *state, size_t range, bool atomic)
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
- return (ret);
+ return ret;
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_PRNG_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof.h b/contrib/jemalloc/include/jemalloc/internal/prof.h
deleted file mode 100644
index 8293b71edc65..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/prof.h
+++ /dev/null
@@ -1,547 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_cnt_s prof_cnt_t;
-typedef struct prof_tctx_s prof_tctx_t;
-typedef struct prof_gctx_s prof_gctx_t;
-typedef struct prof_tdata_s prof_tdata_t;
-
-/* Option defaults. */
-#ifdef JEMALLOC_PROF
-# define PROF_PREFIX_DEFAULT "jeprof"
-#else
-# define PROF_PREFIX_DEFAULT ""
-#endif
-#define LG_PROF_SAMPLE_DEFAULT 19
-#define LG_PROF_INTERVAL_DEFAULT -1
-
-/*
- * Hard limit on stack backtrace depth. The version of prof_backtrace() that
- * is based on __builtin_return_address() necessarily has a hard-coded number
- * of backtrace frame handlers, and should be kept in sync with this setting.
- */
-#define PROF_BT_MAX 128
-
-/* Initial hash table size. */
-#define PROF_CKH_MINITEMS 64
-
-/* Size of memory buffer to use when writing dump files. */
-#define PROF_DUMP_BUFSIZE 65536
-
-/* Size of stack-allocated buffer used by prof_printf(). */
-#define PROF_PRINTF_BUFSIZE 128
-
-/*
- * Number of mutexes shared among all gctx's. No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define PROF_NCTX_LOCKS 1024
-
-/*
- * Number of mutexes shared among all tdata's. No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define PROF_NTDATA_LOCKS 256
-
-/*
- * prof_tdata pointers close to NULL are used to encode state information that
- * is used for cleaning up during thread shutdown.
- */
-#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
-#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
-#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct prof_bt_s {
- /* Backtrace, stored as len program counters. */
- void **vec;
- unsigned len;
-};
-
-#ifdef JEMALLOC_PROF_LIBGCC
-/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
-typedef struct {
- prof_bt_t *bt;
- unsigned max;
-} prof_unwind_data_t;
-#endif
-
-struct prof_cnt_s {
- /* Profiling counters. */
- uint64_t curobjs;
- uint64_t curbytes;
- uint64_t accumobjs;
- uint64_t accumbytes;
-};
-
-typedef enum {
- prof_tctx_state_initializing,
- prof_tctx_state_nominal,
- prof_tctx_state_dumping,
- prof_tctx_state_purgatory /* Dumper must finish destroying. */
-} prof_tctx_state_t;
-
-struct prof_tctx_s {
- /* Thread data for thread that performed the allocation. */
- prof_tdata_t *tdata;
-
- /*
- * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
- * defunct during teardown.
- */
- uint64_t thr_uid;
- uint64_t thr_discrim;
-
- /* Profiling counters, protected by tdata->lock. */
- prof_cnt_t cnts;
-
- /* Associated global context. */
- prof_gctx_t *gctx;
-
- /*
- * UID that distinguishes multiple tctx's created by the same thread,
- * but coexisting in gctx->tctxs. There are two ways that such
- * coexistence can occur:
- * - A dumper thread can cause a tctx to be retained in the purgatory
- * state.
- * - Although a single "producer" thread must create all tctx's which
- * share the same thr_uid, multiple "consumers" can each concurrently
- * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
- * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
- * threshold can be hit again before the first consumer finishes
- * executing prof_tctx_destroy().
- */
- uint64_t tctx_uid;
-
- /* Linkage into gctx's tctxs. */
- rb_node(prof_tctx_t) tctx_link;
-
- /*
- * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
- * sample vs destroy race.
- */
- bool prepared;
-
- /* Current dump-related state, protected by gctx->lock. */
- prof_tctx_state_t state;
-
- /*
- * Copy of cnts snapshotted during early dump phase, protected by
- * dump_mtx.
- */
- prof_cnt_t dump_cnts;
-};
-typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
-
-struct prof_gctx_s {
- /* Protects nlimbo, cnt_summed, and tctxs. */
- malloc_mutex_t *lock;
-
- /*
- * Number of threads that currently cause this gctx to be in a state of
- * limbo due to one of:
- * - Initializing this gctx.
- * - Initializing per thread counters associated with this gctx.
- * - Preparing to destroy this gctx.
- * - Dumping a heap profile that includes this gctx.
- * nlimbo must be 1 (single destroyer) in order to safely destroy the
- * gctx.
- */
- unsigned nlimbo;
-
- /*
- * Tree of profile counters, one for each thread that has allocated in
- * this context.
- */
- prof_tctx_tree_t tctxs;
-
- /* Linkage for tree of contexts to be dumped. */
- rb_node(prof_gctx_t) dump_link;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* Associated backtrace. */
- prof_bt_t bt;
-
- /* Backtrace vector, variable size, referred to by bt. */
- void *vec[1];
-};
-typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
-
-struct prof_tdata_s {
- malloc_mutex_t *lock;
-
- /* Monotonically increasing unique thread identifier. */
- uint64_t thr_uid;
-
- /*
- * Monotonically increasing discriminator among tdata structures
- * associated with the same thr_uid.
- */
- uint64_t thr_discrim;
-
- /* Included in heap profile dumps if non-NULL. */
- char *thread_name;
-
- bool attached;
- bool expired;
-
- rb_node(prof_tdata_t) tdata_link;
-
- /*
- * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
- * necessary when incrementing this field, because only one thread ever
- * does so.
- */
- uint64_t tctx_uid_next;
-
- /*
- * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
- * backtraces for which it has non-zero allocation/deallocation counters
- * associated with thread-specific prof_tctx_t objects. Other threads
- * may write to prof_tctx_t contents when freeing associated objects.
- */
- ckh_t bt2tctx;
-
- /* Sampling state. */
- uint64_t prng_state;
- uint64_t bytes_until_sample;
-
- /* State used to avoid dumping while operating on prof internals. */
- bool enq;
- bool enq_idump;
- bool enq_gdump;
-
- /*
- * Set to true during an early dump phase for tdata's which are
- * currently being dumped. New threads' tdata's have this initialized
- * to false so that they aren't accidentally included in later dump
- * phases.
- */
- bool dumping;
-
- /*
- * True if profiling is active for this tdata's thread
- * (thread.prof.active mallctl).
- */
- bool active;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* Backtrace vector, used for calls to prof_backtrace(). */
- void *vec[PROF_BT_MAX];
-};
-typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern bool opt_prof;
-extern bool opt_prof_active;
-extern bool opt_prof_thread_active_init;
-extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
-extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
-extern bool opt_prof_gdump; /* High-water memory dumping. */
-extern bool opt_prof_final; /* Final profile dumping. */
-extern bool opt_prof_leak; /* Dump leak summary at exit. */
-extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern char opt_prof_prefix[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
-
-/* Accessed via prof_active_[gs]et{_unlocked,}(). */
-extern bool prof_active;
-
-/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
-extern bool prof_gdump_val;
-
-/*
- * Profile dump interval, measured in bytes allocated. Each arena triggers a
- * profile dump when it reaches this threshold. The effect is that the
- * interval between profile dumps averages prof_interval, though the actual
- * interval between dumps will tend to be sporadic, and the interval will be a
- * maximum of approximately (prof_interval * narenas).
- */
-extern uint64_t prof_interval;
-
-/*
- * Initialized as opt_lg_prof_sample, and potentially modified during profiling
- * resets.
- */
-extern size_t lg_prof_sample;
-
-void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
-void bt_init(prof_bt_t *bt, void **vec);
-void prof_backtrace(prof_bt_t *bt);
-prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
-#ifdef JEMALLOC_JET
-size_t prof_tdata_count(void);
-size_t prof_bt_count(void);
-const prof_cnt_t *prof_cnt_all(void);
-typedef int (prof_dump_open_t)(bool, const char *);
-extern prof_dump_open_t *prof_dump_open;
-typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
-extern prof_dump_header_t *prof_dump_header;
-#endif
-void prof_idump(tsdn_t *tsdn);
-bool prof_mdump(tsd_t *tsd, const char *filename);
-void prof_gdump(tsdn_t *tsdn);
-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
-prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-void prof_reset(tsd_t *tsd, size_t lg_sample);
-void prof_tdata_cleanup(tsd_t *tsd);
-bool prof_active_get(tsdn_t *tsdn);
-bool prof_active_set(tsdn_t *tsdn, bool active);
-const char *prof_thread_name_get(tsd_t *tsd);
-int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(tsd_t *tsd);
-bool prof_thread_active_set(tsd_t *tsd, bool active);
-bool prof_thread_active_init_get(tsdn_t *tsdn);
-bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
-bool prof_gdump_get(tsdn_t *tsdn);
-bool prof_gdump_set(tsdn_t *tsdn, bool active);
-void prof_boot0(void);
-void prof_boot1(void);
-bool prof_boot2(tsd_t *tsd);
-void prof_prefork0(tsdn_t *tsdn);
-void prof_prefork1(tsdn_t *tsdn);
-void prof_postfork_parent(tsdn_t *tsdn);
-void prof_postfork_child(tsdn_t *tsdn);
-void prof_sample_threshold_update(prof_tdata_t *tdata);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool prof_active_get_unlocked(void);
-bool prof_gdump_get_unlocked(void);
-prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
-prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *tctx);
-bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
- prof_tdata_t **tdata_out);
-prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
- bool update);
-void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
- size_t old_usize, prof_tctx_t *old_tctx);
-void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
-JEMALLOC_ALWAYS_INLINE bool
-prof_active_get_unlocked(void)
-{
-
- /*
- * Even if opt_prof is true, sampling can be temporarily disabled by
- * setting prof_active to false. No locking is used when reading
- * prof_active in the fast path, so there are no guarantees regarding
- * how long it will take for all threads to notice state changes.
- */
- return (prof_active);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_gdump_get_unlocked(void)
-{
-
- /*
- * No locking is used when reading prof_gdump_val in the fast path, so
- * there are no guarantees regarding how long it will take for all
- * threads to notice state changes.
- */
- return (prof_gdump_val);
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tdata_t *
-prof_tdata_get(tsd_t *tsd, bool create)
-{
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- tdata = tsd_prof_tdata_get(tsd);
- if (create) {
- if (unlikely(tdata == NULL)) {
- if (tsd_nominal(tsd)) {
- tdata = prof_tdata_init(tsd);
- tsd_prof_tdata_set(tsd, tdata);
- }
- } else if (unlikely(tdata->expired)) {
- tdata = prof_tdata_reinit(tsd, tdata);
- tsd_prof_tdata_set(tsd, tdata);
- }
- assert(tdata == NULL || tdata->attached);
- }
-
- return (tdata);
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const void *ptr)
-{
-
- cassert(config_prof);
- assert(ptr != NULL);
-
- return (arena_prof_tctx_get(tsdn, ptr));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
-{
-
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_set(tsdn, ptr, usize, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
- prof_tctx_t *old_tctx)
-{
-
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
- prof_tdata_t **tdata_out)
-{
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- tdata = prof_tdata_get(tsd, true);
- if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
- tdata = NULL;
-
- if (tdata_out != NULL)
- *tdata_out = tdata;
-
- if (unlikely(tdata == NULL))
- return (true);
-
- if (likely(tdata->bytes_until_sample >= usize)) {
- if (update)
- tdata->bytes_until_sample -= usize;
- return (true);
- } else {
- /* Compute new sample threshold. */
- if (update)
- prof_sample_threshold_update(tdata);
- return (!tdata->active);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
-{
- prof_tctx_t *ret;
- prof_tdata_t *tdata;
- prof_bt_t bt;
-
- assert(usize == s2u(usize));
-
- if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
- &tdata)))
- ret = (prof_tctx_t *)(uintptr_t)1U;
- else {
- bt_init(&bt, tdata->vec);
- prof_backtrace(&bt);
- ret = prof_lookup(tsd, &bt);
- }
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
-{
-
- cassert(config_prof);
- assert(ptr != NULL);
- assert(usize == isalloc(tsdn, ptr, true));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
- prof_malloc_sample_object(tsdn, ptr, usize, tctx);
- else
- prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
- bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
- prof_tctx_t *old_tctx)
-{
- bool sampled, old_sampled;
-
- cassert(config_prof);
- assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
-
- if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
- if (prof_sample_accum_update(tsd, usize, true, NULL)) {
- /*
- * Don't sample. The usize passed to prof_alloc_prep()
- * was larger than what actually got allocated, so a
- * backtrace was captured for this allocation, even
- * though its actual usize was insufficient to cross the
- * sample threshold.
- */
- prof_alloc_rollback(tsd, tctx, true);
- tctx = (prof_tctx_t *)(uintptr_t)1U;
- }
- }
-
- sampled = ((uintptr_t)tctx > (uintptr_t)1U);
- old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
-
- if (unlikely(sampled))
- prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
- else
- prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
-
- if (unlikely(old_sampled))
- prof_free_sampled_object(tsd, old_usize, old_tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_free(tsd_t *tsd, const void *ptr, size_t usize)
-{
- prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
-
- cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
- prof_free_sampled_object(tsd, usize, tctx);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_externs.h b/contrib/jemalloc/include/jemalloc/internal/prof_externs.h
new file mode 100644
index 000000000000..04348696f580
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_externs.h
@@ -0,0 +1,92 @@
+#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
+#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
+
+#include "jemalloc/internal/mutex.h"
+
+extern malloc_mutex_t bt2gctx_mtx;
+
+extern bool opt_prof;
+extern bool opt_prof_active;
+extern bool opt_prof_thread_active_init;
+extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
+extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
+extern bool opt_prof_gdump; /* High-water memory dumping. */
+extern bool opt_prof_final; /* Final profile dumping. */
+extern bool opt_prof_leak; /* Dump leak summary at exit. */
+extern bool opt_prof_accum; /* Report cumulative bytes. */
+extern char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Accessed via prof_active_[gs]et{_unlocked,}(). */
+extern bool prof_active;
+
+/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
+extern bool prof_gdump_val;
+
+/*
+ * Profile dump interval, measured in bytes allocated. Each arena triggers a
+ * profile dump when it reaches this threshold. The effect is that the
+ * interval between profile dumps averages prof_interval, though the actual
+ * interval between dumps will tend to be sporadic, and the interval will be a
+ * maximum of approximately (prof_interval * narenas).
+ */
+extern uint64_t prof_interval;
+
+/*
+ * Initialized as opt_lg_prof_sample, and potentially modified during profiling
+ * resets.
+ */
+extern size_t lg_prof_sample;
+
+void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
+void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
+void bt_init(prof_bt_t *bt, void **vec);
+void prof_backtrace(prof_bt_t *bt);
+prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
+#ifdef JEMALLOC_JET
+size_t prof_tdata_count(void);
+size_t prof_bt_count(void);
+#endif
+typedef int (prof_dump_open_t)(bool, const char *);
+extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
+
+typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
+extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
+#ifdef JEMALLOC_JET
+void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
+ uint64_t *accumbytes);
+#endif
+bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
+void prof_idump(tsdn_t *tsdn);
+bool prof_mdump(tsd_t *tsd, const char *filename);
+void prof_gdump(tsdn_t *tsdn);
+prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
+void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_tdata_cleanup(tsd_t *tsd);
+bool prof_active_get(tsdn_t *tsdn);
+bool prof_active_set(tsdn_t *tsdn, bool active);
+const char *prof_thread_name_get(tsd_t *tsd);
+int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
+bool prof_thread_active_get(tsd_t *tsd);
+bool prof_thread_active_set(tsd_t *tsd, bool active);
+bool prof_thread_active_init_get(tsdn_t *tsdn);
+bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
+bool prof_gdump_get(tsdn_t *tsdn);
+bool prof_gdump_set(tsdn_t *tsdn, bool active);
+void prof_boot0(void);
+void prof_boot1(void);
+bool prof_boot2(tsd_t *tsd);
+void prof_prefork0(tsdn_t *tsdn);
+void prof_prefork1(tsdn_t *tsdn);
+void prof_postfork_parent(tsdn_t *tsdn);
+void prof_postfork_child(tsdn_t *tsdn);
+void prof_sample_threshold_update(prof_tdata_t *tdata);
+
+#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h
new file mode 100644
index 000000000000..eda6839ade44
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h
@@ -0,0 +1,72 @@
+#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
+#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
+
+#include "jemalloc/internal/mutex.h"
+
+static inline bool
+prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
+ cassert(config_prof);
+
+ bool overflow;
+ uint64_t a0, a1;
+
+ /*
+ * If the application allocates fast enough (and/or if idump is slow
+ * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
+ * idump trigger coalescing. This is an intentional mechanism that
+ * avoids rate-limiting allocation.
+ */
+#ifdef JEMALLOC_ATOMIC_U64
+ a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
+ do {
+ a1 = a0 + accumbytes;
+ assert(a1 >= a0);
+ overflow = (a1 >= prof_interval);
+ if (overflow) {
+ a1 %= prof_interval;
+ }
+ } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
+ a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
+#else
+ malloc_mutex_lock(tsdn, &prof_accum->mtx);
+ a0 = prof_accum->accumbytes;
+ a1 = a0 + accumbytes;
+ overflow = (a1 >= prof_interval);
+ if (overflow) {
+ a1 %= prof_interval;
+ }
+ prof_accum->accumbytes = a1;
+ malloc_mutex_unlock(tsdn, &prof_accum->mtx);
+#endif
+ return overflow;
+}
+
+static inline void
+prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
+ cassert(config_prof);
+
+ /*
+ * Cancel out as much of the excessive prof_accumbytes increase as
+ * possible without underflowing. Interval-triggered dumps occur
+ * slightly more often than intended as a result of incomplete
+ * canceling.
+ */
+ uint64_t a0, a1;
+#ifdef JEMALLOC_ATOMIC_U64
+ a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
+ do {
+ a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
+ usize) : 0;
+ } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
+ a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
+#else
+ malloc_mutex_lock(tsdn, &prof_accum->mtx);
+ a0 = prof_accum->accumbytes;
+ a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
+ 0;
+ prof_accum->accumbytes = a1;
+ malloc_mutex_unlock(tsdn, &prof_accum->mtx);
+#endif
+}
+
+#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h
new file mode 100644
index 000000000000..d670cb7b8f8b
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h
@@ -0,0 +1,217 @@
+#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
+#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
+
+#include "jemalloc/internal/sz.h"
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_active_get_unlocked(void) {
+ /*
+ * Even if opt_prof is true, sampling can be temporarily disabled by
+ * setting prof_active to false. No locking is used when reading
+ * prof_active in the fast path, so there are no guarantees regarding
+ * how long it will take for all threads to notice state changes.
+ */
+ return prof_active;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_gdump_get_unlocked(void) {
+ /*
+ * No locking is used when reading prof_gdump_val in the fast path, so
+ * there are no guarantees regarding how long it will take for all
+ * threads to notice state changes.
+ */
+ return prof_gdump_val;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tdata_t *
+prof_tdata_get(tsd_t *tsd, bool create) {
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (create) {
+ if (unlikely(tdata == NULL)) {
+ if (tsd_nominal(tsd)) {
+ tdata = prof_tdata_init(tsd);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ } else if (unlikely(tdata->expired)) {
+ tdata = prof_tdata_reinit(tsd, tdata);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ assert(tdata == NULL || tdata->attached);
+ }
+
+ return tdata;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_reset(tsdn, ptr, tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
+ prof_tdata_t **tdata_out) {
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, true);
+ if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
+ tdata = NULL;
+ }
+
+ if (tdata_out != NULL) {
+ *tdata_out = tdata;
+ }
+
+ if (unlikely(tdata == NULL)) {
+ return true;
+ }
+
+ if (likely(tdata->bytes_until_sample >= usize)) {
+ if (update) {
+ tdata->bytes_until_sample -= usize;
+ }
+ return true;
+ } else {
+ if (tsd_reentrancy_level_get(tsd) > 0) {
+ return true;
+ }
+ /* Compute new sample threshold. */
+ if (update) {
+ prof_sample_threshold_update(tdata);
+ }
+ return !tdata->active;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
+ prof_tctx_t *ret;
+ prof_tdata_t *tdata;
+ prof_bt_t bt;
+
+ assert(usize == sz_s2u(usize));
+
+ if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+ &tdata))) {
+ ret = (prof_tctx_t *)(uintptr_t)1U;
+ } else {
+ bt_init(&bt, tdata->vec);
+ prof_backtrace(&bt);
+ ret = prof_lookup(tsd, &bt);
+ }
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
+ prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(usize == isalloc(tsdn, ptr));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
+ prof_malloc_sample_object(tsdn, ptr, usize, tctx);
+ } else {
+ prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
+ (prof_tctx_t *)(uintptr_t)1U);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
+ bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
+ prof_tctx_t *old_tctx) {
+ bool sampled, old_sampled, moved;
+
+ cassert(config_prof);
+ assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
+
+ if (prof_active && !updated && ptr != NULL) {
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ if (prof_sample_accum_update(tsd, usize, true, NULL)) {
+ /*
+ * Don't sample. The usize passed to prof_alloc_prep()
+ * was larger than what actually got allocated, so a
+ * backtrace was captured for this allocation, even
+ * though its actual usize was insufficient to cross the
+ * sample threshold.
+ */
+ prof_alloc_rollback(tsd, tctx, true);
+ tctx = (prof_tctx_t *)(uintptr_t)1U;
+ }
+ }
+
+ sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+ old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
+ moved = (ptr != old_ptr);
+
+ if (unlikely(sampled)) {
+ prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
+ } else if (moved) {
+ prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
+ (prof_tctx_t *)(uintptr_t)1U);
+ } else if (unlikely(old_sampled)) {
+ /*
+ * prof_tctx_set() would work for the !moved case as well, but
+ * prof_tctx_reset() is slightly cheaper, and the proper thing
+ * to do here in the presence of explicit knowledge re: moved
+ * state.
+ */
+ prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
+ } else {
+ assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
+ (uintptr_t)1U);
+ }
+
+ /*
+ * The prof_free_sampled_object() call must come after the
+ * prof_malloc_sample_object() call, because tctx and old_tctx may be
+ * the same, in which case reversing the call order could cause the tctx
+ * to be prematurely destroyed as a side effect of momentarily zeroed
+ * counters.
+ */
+ if (unlikely(old_sampled)) {
+ prof_free_sampled_object(tsd, old_usize, old_tctx);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
+ prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
+
+ cassert(config_prof);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
+ prof_free_sampled_object(tsd, usize, tctx);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_structs.h b/contrib/jemalloc/include/jemalloc/internal/prof_structs.h
new file mode 100644
index 000000000000..0d58ae1005bd
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_structs.h
@@ -0,0 +1,201 @@
+#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
+#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
+
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/rb.h"
+
+struct prof_bt_s {
+ /* Backtrace, stored as len program counters. */
+ void **vec;
+ unsigned len;
+};
+
+#ifdef JEMALLOC_PROF_LIBGCC
+/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
+typedef struct {
+ prof_bt_t *bt;
+ unsigned max;
+} prof_unwind_data_t;
+#endif
+
+struct prof_accum_s {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_t mtx;
+ uint64_t accumbytes;
+#else
+ atomic_u64_t accumbytes;
+#endif
+};
+
+struct prof_cnt_s {
+ /* Profiling counters. */
+ uint64_t curobjs;
+ uint64_t curbytes;
+ uint64_t accumobjs;
+ uint64_t accumbytes;
+};
+
+typedef enum {
+ prof_tctx_state_initializing,
+ prof_tctx_state_nominal,
+ prof_tctx_state_dumping,
+ prof_tctx_state_purgatory /* Dumper must finish destroying. */
+} prof_tctx_state_t;
+
+struct prof_tctx_s {
+ /* Thread data for thread that performed the allocation. */
+ prof_tdata_t *tdata;
+
+ /*
+ * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+ * defunct during teardown.
+ */
+ uint64_t thr_uid;
+ uint64_t thr_discrim;
+
+ /* Profiling counters, protected by tdata->lock. */
+ prof_cnt_t cnts;
+
+ /* Associated global context. */
+ prof_gctx_t *gctx;
+
+ /*
+ * UID that distinguishes multiple tctx's created by the same thread,
+ * but coexisting in gctx->tctxs. There are two ways that such
+ * coexistence can occur:
+ * - A dumper thread can cause a tctx to be retained in the purgatory
+ * state.
+ * - Although a single "producer" thread must create all tctx's which
+ * share the same thr_uid, multiple "consumers" can each concurrently
+ * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
+ * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
+ * threshold can be hit again before the first consumer finishes
+ * executing prof_tctx_destroy().
+ */
+ uint64_t tctx_uid;
+
+ /* Linkage into gctx's tctxs. */
+ rb_node(prof_tctx_t) tctx_link;
+
+ /*
+ * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
+ * sample vs destroy race.
+ */
+ bool prepared;
+
+ /* Current dump-related state, protected by gctx->lock. */
+ prof_tctx_state_t state;
+
+ /*
+ * Copy of cnts snapshotted during early dump phase, protected by
+ * dump_mtx.
+ */
+ prof_cnt_t dump_cnts;
+};
+typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
+
+struct prof_gctx_s {
+ /* Protects nlimbo, cnt_summed, and tctxs. */
+ malloc_mutex_t *lock;
+
+ /*
+ * Number of threads that currently cause this gctx to be in a state of
+ * limbo due to one of:
+ * - Initializing this gctx.
+ * - Initializing per thread counters associated with this gctx.
+ * - Preparing to destroy this gctx.
+ * - Dumping a heap profile that includes this gctx.
+ * nlimbo must be 1 (single destroyer) in order to safely destroy the
+ * gctx.
+ */
+ unsigned nlimbo;
+
+ /*
+ * Tree of profile counters, one for each thread that has allocated in
+ * this context.
+ */
+ prof_tctx_tree_t tctxs;
+
+ /* Linkage for tree of contexts to be dumped. */
+ rb_node(prof_gctx_t) dump_link;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Associated backtrace. */
+ prof_bt_t bt;
+
+ /* Backtrace vector, variable size, referred to by bt. */
+ void *vec[1];
+};
+typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
+
+struct prof_tdata_s {
+ malloc_mutex_t *lock;
+
+ /* Monotonically increasing unique thread identifier. */
+ uint64_t thr_uid;
+
+ /*
+ * Monotonically increasing discriminator among tdata structures
+ * associated with the same thr_uid.
+ */
+ uint64_t thr_discrim;
+
+ /* Included in heap profile dumps if non-NULL. */
+ char *thread_name;
+
+ bool attached;
+ bool expired;
+
+ rb_node(prof_tdata_t) tdata_link;
+
+ /*
+ * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
+ * necessary when incrementing this field, because only one thread ever
+ * does so.
+ */
+ uint64_t tctx_uid_next;
+
+ /*
+ * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
+ * backtraces for which it has non-zero allocation/deallocation counters
+ * associated with thread-specific prof_tctx_t objects. Other threads
+ * may write to prof_tctx_t contents when freeing associated objects.
+ */
+ ckh_t bt2tctx;
+
+ /* Sampling state. */
+ uint64_t prng_state;
+ uint64_t bytes_until_sample;
+
+ /* State used to avoid dumping while operating on prof internals. */
+ bool enq;
+ bool enq_idump;
+ bool enq_gdump;
+
+ /*
+ * Set to true during an early dump phase for tdata's which are
+ * currently being dumped. New threads' tdata's have this initialized
+ * to false so that they aren't accidentally included in later dump
+ * phases.
+ */
+ bool dumping;
+
+ /*
+ * True if profiling is active for this tdata's thread
+ * (thread.prof.active mallctl).
+ */
+ bool active;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Backtrace vector, used for calls to prof_backtrace(). */
+ void *vec[PROF_BT_MAX];
+};
+typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
+
+#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_types.h b/contrib/jemalloc/include/jemalloc/internal/prof_types.h
new file mode 100644
index 000000000000..1eff995ecf0f
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_types.h
@@ -0,0 +1,56 @@
+#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
+#define JEMALLOC_INTERNAL_PROF_TYPES_H
+
+typedef struct prof_bt_s prof_bt_t;
+typedef struct prof_accum_s prof_accum_t;
+typedef struct prof_cnt_s prof_cnt_t;
+typedef struct prof_tctx_s prof_tctx_t;
+typedef struct prof_gctx_s prof_gctx_t;
+typedef struct prof_tdata_s prof_tdata_t;
+
+/* Option defaults. */
+#ifdef JEMALLOC_PROF
+# define PROF_PREFIX_DEFAULT "jeprof"
+#else
+# define PROF_PREFIX_DEFAULT ""
+#endif
+#define LG_PROF_SAMPLE_DEFAULT 19
+#define LG_PROF_INTERVAL_DEFAULT -1
+
+/*
+ * Hard limit on stack backtrace depth. The version of prof_backtrace() that
+ * is based on __builtin_return_address() necessarily has a hard-coded number
+ * of backtrace frame handlers, and should be kept in sync with this setting.
+ */
+#define PROF_BT_MAX 128
+
+/* Initial hash table size. */
+#define PROF_CKH_MINITEMS 64
+
+/* Size of memory buffer to use when writing dump files. */
+#define PROF_DUMP_BUFSIZE 65536
+
+/* Size of stack-allocated buffer used by prof_printf(). */
+#define PROF_PRINTF_BUFSIZE 128
+
+/*
+ * Number of mutexes shared among all gctx's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NCTX_LOCKS 1024
+
+/*
+ * Number of mutexes shared among all tdata's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NTDATA_LOCKS 256
+
+/*
+ * prof_tdata pointers close to NULL are used to encode state information that
+ * is used for cleaning up during thread shutdown.
+ */
+#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
+#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
+#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
+
+#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/public_namespace.h b/contrib/jemalloc/include/jemalloc/internal/public_namespace.h
index d5a4de800d20..abdf89fb02cb 100644
--- a/contrib/jemalloc/include/jemalloc/internal/public_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/public_namespace.h
@@ -1,21 +1,21 @@
-#define je_malloc_conf JEMALLOC_N(malloc_conf)
-#define je_malloc_message JEMALLOC_N(malloc_message)
-#define je_malloc JEMALLOC_N(malloc)
-#define je_calloc JEMALLOC_N(calloc)
-#define je_posix_memalign JEMALLOC_N(posix_memalign)
-#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
-#define je_realloc JEMALLOC_N(realloc)
-#define je_free JEMALLOC_N(free)
-#define je_mallocx JEMALLOC_N(mallocx)
-#define je_rallocx JEMALLOC_N(rallocx)
-#define je_xallocx JEMALLOC_N(xallocx)
-#define je_sallocx JEMALLOC_N(sallocx)
-#define je_dallocx JEMALLOC_N(dallocx)
-#define je_sdallocx JEMALLOC_N(sdallocx)
-#define je_nallocx JEMALLOC_N(nallocx)
-#define je_mallctl JEMALLOC_N(mallctl)
-#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
-#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
-#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
-#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
-#define je_valloc JEMALLOC_N(valloc)
+#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
+#define je_calloc JEMALLOC_N(calloc)
+#define je_dallocx JEMALLOC_N(dallocx)
+#define je_free JEMALLOC_N(free)
+#define je_mallctl JEMALLOC_N(mallctl)
+#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
+#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
+#define je_malloc JEMALLOC_N(malloc)
+#define je_malloc_conf JEMALLOC_N(malloc_conf)
+#define je_malloc_message JEMALLOC_N(malloc_message)
+#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
+#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
+#define je_mallocx JEMALLOC_N(mallocx)
+#define je_nallocx JEMALLOC_N(nallocx)
+#define je_posix_memalign JEMALLOC_N(posix_memalign)
+#define je_rallocx JEMALLOC_N(rallocx)
+#define je_realloc JEMALLOC_N(realloc)
+#define je_sallocx JEMALLOC_N(sallocx)
+#define je_sdallocx JEMALLOC_N(sdallocx)
+#define je_xallocx JEMALLOC_N(xallocx)
+#define je_valloc JEMALLOC_N(valloc)
diff --git a/contrib/jemalloc/include/jemalloc/internal/ql.h b/contrib/jemalloc/include/jemalloc/internal/ql.h
index 1834bb8557ac..802904077161 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ql.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ql.h
@@ -1,59 +1,64 @@
+#ifndef JEMALLOC_INTERNAL_QL_H
+#define JEMALLOC_INTERNAL_QL_H
+
+#include "jemalloc/internal/qr.h"
+
/* List definitions. */
-#define ql_head(a_type) \
+#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
-#define ql_head_initializer(a_head) {NULL}
+#define ql_head_initializer(a_head) {NULL}
-#define ql_elm(a_type) qr(a_type)
+#define ql_elm(a_type) qr(a_type)
/* List functions. */
-#define ql_new(a_head) do { \
+#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-#define ql_first(a_head) ((a_head)->qlh_first)
+#define ql_first(a_head) ((a_head)->qlh_first)
-#define ql_last(a_head, a_field) \
+#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
-#define ql_next(a_head, a_elm, a_field) \
+#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
-#define ql_prev(a_head, a_elm, a_field) \
+#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
-#define ql_head_insert(a_head, a_elm, a_field) do { \
+#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
-#define ql_remove(a_head, a_elm, a_field) do { \
+#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
@@ -64,18 +69,20 @@ struct { \
} \
} while (0)
-#define ql_head_remove(a_head, a_type, a_field) do { \
+#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_tail_remove(a_head, a_type, a_field) do { \
+#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_foreach(a_var, a_head, a_field) \
+#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
-#define ql_reverse_foreach(a_var, a_head, a_field) \
+#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
+
+#endif /* JEMALLOC_INTERNAL_QL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/qr.h b/contrib/jemalloc/include/jemalloc/internal/qr.h
index 0fbaec25e7c0..1e1056b38685 100644
--- a/contrib/jemalloc/include/jemalloc/internal/qr.h
+++ b/contrib/jemalloc/include/jemalloc/internal/qr.h
@@ -1,38 +1,39 @@
+#ifndef JEMALLOC_INTERNAL_QR_H
+#define JEMALLOC_INTERNAL_QR_H
+
/* Ring definitions. */
-#define qr(a_type) \
+#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
-#define qr_new(a_qr, a_field) do { \
+#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
- do \
- { \
+#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
- } while (0)
+} while (0)
-#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
- void *t; \
+#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
+ a_type *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
@@ -44,10 +45,10 @@ struct { \
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
-#define qr_split(a_qr_a, a_qr_b, a_field) \
- qr_meld((a_qr_a), (a_qr_b), a_field)
+#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
+ qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
-#define qr_remove(a_qr, a_field) do { \
+#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
@@ -56,14 +57,16 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_foreach(var, a_qr, a_field) \
+#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
-#define qr_reverse_foreach(var, a_qr, a_field) \
+#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
+
+#endif /* JEMALLOC_INTERNAL_QR_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/quarantine.h b/contrib/jemalloc/include/jemalloc/internal/quarantine.h
deleted file mode 100644
index ae607399f6d7..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/quarantine.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct quarantine_obj_s quarantine_obj_t;
-typedef struct quarantine_s quarantine_t;
-
-/* Default per thread quarantine size if valgrind is enabled. */
-#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct quarantine_obj_s {
- void *ptr;
- size_t usize;
-};
-
-struct quarantine_s {
- size_t curbytes;
- size_t curobjs;
- size_t first;
-#define LG_MAXOBJS_INIT 10
- size_t lg_maxobjs;
- quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void quarantine_alloc_hook_work(tsd_t *tsd);
-void quarantine(tsd_t *tsd, void *ptr);
-void quarantine_cleanup(tsd_t *tsd);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void quarantine_alloc_hook(void);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
-JEMALLOC_ALWAYS_INLINE void
-quarantine_alloc_hook(void)
-{
- tsd_t *tsd;
-
- assert(config_fill && opt_quarantine);
-
- tsd = tsd_fetch();
- if (tsd_quarantine_get(tsd) == NULL)
- quarantine_alloc_hook_work(tsd);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/contrib/jemalloc/include/jemalloc/internal/rb.h b/contrib/jemalloc/include/jemalloc/internal/rb.h
index 3770342f805a..47fa5ca99bbe 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rb.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rb.h
@@ -20,17 +20,21 @@
*/
#ifndef RB_H_
-#define RB_H_
+#define RB_H_
+
+#ifndef __PGI
+#define RB_COMPACT
+#endif
#ifdef RB_COMPACT
/* Node structure. */
-#define rb_node(a_type) \
+#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
-#define rb_node(a_type) \
+#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
@@ -39,48 +43,48 @@ struct { \
#endif
/* Root structure. */
-#define rb_tree(a_type) \
+#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
}
/* Left accessors. */
-#define rbtn_left_get(a_type, a_field, a_node) \
+#define rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
-#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
+#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
-#define rbtn_right_get(a_type, a_field, a_node) \
+#define rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
-#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
+#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
-#define rbtn_red_get(a_type, a_field, a_node) \
+#define rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
-#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
+#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
-#define rbtn_red_set(a_type, a_field, a_node) do { \
+#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
-#define rbtn_black_set(a_type, a_field, a_node) do { \
+#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
+#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
/* Bookkeeping bit cannot be used by node pointer. */ \
assert(((uintptr_t)(a_node) & 0x1) == 0); \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
@@ -89,27 +93,27 @@ struct { \
} while (0)
#else
/* Right accessors. */
-#define rbtn_right_get(a_type, a_field, a_node) \
+#define rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
-#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
+#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
-#define rbtn_red_get(a_type, a_field, a_node) \
+#define rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
-#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
+#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
-#define rbtn_red_set(a_type, a_field, a_node) do { \
+#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
-#define rbtn_black_set(a_type, a_field, a_node) do { \
+#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
+#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), NULL); \
rbtn_right_set(a_type, a_field, (a_node), NULL); \
rbtn_red_set(a_type, a_field, (a_node)); \
@@ -117,12 +121,12 @@ struct { \
#endif
/* Tree initializer. */
-#define rb_new(a_type, a_field, a_rbt) do { \
+#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = NULL; \
} while (0)
/* Internal utility macros. */
-#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
+#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; \
@@ -132,7 +136,7 @@ struct { \
} \
} while (0)
-#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
+#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != NULL) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
@@ -141,14 +145,14 @@ struct { \
} \
} while (0)
-#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
+#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
-#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
+#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
@@ -160,7 +164,7 @@ struct { \
* functions generated by an equivalently parameterized call to rb_gen().
*/
-#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
+#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
@@ -335,7 +339,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* has begun.
* arg : Opaque pointer passed to cb().
*/
-#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
+#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
@@ -348,13 +352,13 @@ a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
@@ -379,7 +383,7 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
assert(tnode != NULL); \
} \
} \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
@@ -404,7 +408,7 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
assert(tnode != NULL); \
} \
} \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
@@ -419,7 +423,7 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
@@ -438,7 +442,7 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
@@ -457,7 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
- return (ret); \
+ return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
@@ -550,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
- for (pathp++; pathp->node != NULL; \
- pathp++) { \
+ for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
@@ -873,16 +876,16 @@ a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
- return (NULL); \
+ return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
arg)) != NULL) { \
- return (ret); \
+ return ret; \
} \
- return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg)); \
+ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -894,20 +897,20 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
+ return ret; \
} \
- return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg)); \
+ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg); \
} else if (cmp > 0) { \
- return (a_prefix##iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)); \
+ return a_prefix##iter_start(rbtree, start, \
+ rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
+ return ret; \
} \
- return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg)); \
+ return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
+ a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -920,22 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
- return (ret); \
+ return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
- return (NULL); \
+ return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
+ return ret; \
} \
- return (a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ return a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -948,20 +951,20 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
+ return ret; \
} \
- return (a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ return a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
- return (a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ return a_prefix##reverse_iter_start(rbtree, start, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return (ret); \
+ return ret; \
} \
- return (a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg)); \
+ return a_prefix##reverse_iter_recurse(rbtree, \
+ rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@@ -975,7 +978,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
- return (ret); \
+ return ret; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree.h b/contrib/jemalloc/include/jemalloc/internal/rtree.h
index 8d0c584daf08..b5d4db3988f3 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rtree.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree.h
@@ -1,75 +1,72 @@
+#ifndef JEMALLOC_INTERNAL_RTREE_H
+#define JEMALLOC_INTERNAL_RTREE_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree_tsd.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/tsd.h"
+
/*
* This radix tree implementation is tailored to the singular purpose of
- * associating metadata with chunks that are currently owned by jemalloc.
+ * associating metadata with extents that are currently owned by jemalloc.
*
*******************************************************************************
*/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct rtree_node_elm_s rtree_node_elm_t;
-typedef struct rtree_level_s rtree_level_t;
-typedef struct rtree_s rtree_t;
-
-/*
- * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
- * machine address width.
- */
-#define LG_RTREE_BITS_PER_LEVEL 4
-#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
-/* Maximum rtree height. */
-#define RTREE_HEIGHT_MAX \
- ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
-/* Used for two-stage lock-free node initialization. */
-#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
-
-/*
- * The node allocation callback function's argument is the number of contiguous
- * rtree_node_elm_t structures to allocate, and the resulting memory must be
- * zeroed.
- */
-typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
-typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
+/* Number of high insignificant bits. */
+#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
+/* Number of low insigificant bits. */
+#define RTREE_NLIB LG_PAGE
+/* Number of significant bits. */
+#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
+/* Number of levels in radix tree. */
+#if RTREE_NSB <= 10
+# define RTREE_HEIGHT 1
+#elif RTREE_NSB <= 36
+# define RTREE_HEIGHT 2
+#elif RTREE_NSB <= 52
+# define RTREE_HEIGHT 3
+#else
+# error Unsupported number of significant virtual address bits
+#endif
+/* Use compact leaf representation if virtual address encoding allows. */
+#if RTREE_NHIB >= LG_CEIL_NSIZES
+# define RTREE_LEAF_COMPACT
+#endif
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
+/* Needed for initialization only. */
+#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
+typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
- union {
- void *pun;
- rtree_node_elm_t *child;
- extent_node_t *val;
- };
+ atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
-struct rtree_level_s {
+struct rtree_leaf_elm_s {
+#ifdef RTREE_LEAF_COMPACT
/*
- * A non-NULL subtree points to a subtree rooted along the hypothetical
- * path to the leaf node corresponding to key 0. Depending on what keys
- * have been used to store to the tree, an arbitrary combination of
- * subtree pointers may remain NULL.
- *
- * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
- * This results in a 3-level tree, and the leftmost leaf can be directly
- * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
- * 0x00000000) can be accessed via subtrees[1], and the remainder of the
- * tree can be accessed via subtrees[0].
- *
- * levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
+ * Single pointer-width field containing all three leaf element fields.
+ * For example, on a 64-bit x64 system with 48 significant virtual
+ * memory address bits, the index, extent, and slab fields are packed as
+ * such:
*
- * levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
+ * x: index
+ * e: extent
+ * b: slab
*
- * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
- *
- * This has practical implications on x64, which currently uses only the
- * lower 47 bits of virtual address space in userland, thus leaving
- * subtrees[0] unused and avoiding a level of tree traversal.
+ * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
- union {
- void *subtree_pun;
- rtree_node_elm_t *subtree;
- };
+ atomic_p_t le_bits;
+#else
+ atomic_p_t le_extent; /* (extent_t *) */
+ atomic_u_t le_szind; /* (szind_t) */
+ atomic_b_t le_slab; /* (bool) */
+#endif
+};
+
+typedef struct rtree_level_s rtree_level_t;
+struct rtree_level_s {
/* Number of key bits distinguished by this level. */
unsigned bits;
/*
@@ -79,288 +76,399 @@ struct rtree_level_s {
unsigned cumbits;
};
+typedef struct rtree_s rtree_t;
struct rtree_s {
- rtree_node_alloc_t *alloc;
- rtree_node_dalloc_t *dalloc;
- unsigned height;
- /*
- * Precomputed table used to convert from the number of leading 0 key
- * bits to which subtree level to start at.
- */
- unsigned start_level[RTREE_HEIGHT_MAX];
- rtree_level_t levels[RTREE_HEIGHT_MAX];
+ malloc_mutex_t init_lock;
+ /* Number of elements based on rtree_levels[0].bits. */
+#if RTREE_HEIGHT > 1
+ rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
+#else
+ rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
+#endif
};
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
- rtree_node_dalloc_t *dalloc);
-void rtree_delete(rtree_t *rtree);
-rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
- unsigned level);
-rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
- rtree_node_elm_t *elm, unsigned level);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
-uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
-
-bool rtree_node_valid(rtree_node_elm_t *node);
-rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
- bool dependent);
-rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level, bool dependent);
-extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
- bool dependent);
-void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
- const extent_node_t *val);
-rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
- bool dependent);
-rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
- bool dependent);
-
-extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
-bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
+/*
+ * Split the bits into one to three partitions depending on number of
+ * significant bits. It the number of bits does not divide evenly into the
+ * number of levels, place one remainder bit per level starting at the leaf
+ * level.
+ */
+static const rtree_level_t rtree_levels[] = {
+#if RTREE_HEIGHT == 1
+ {RTREE_NSB, RTREE_NHIB + RTREE_NSB}
+#elif RTREE_HEIGHT == 2
+ {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
+ {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
+#elif RTREE_HEIGHT == 3
+ {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
+ {RTREE_NSB/3 + RTREE_NSB%3/2,
+ RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
+ {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
+#else
+# error Unsupported rtree height
#endif
+};
+
+bool rtree_new(rtree_t *rtree, bool zeroed);
+
+typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
+extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
+
+typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
+extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
+
+typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
+extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-JEMALLOC_ALWAYS_INLINE unsigned
-rtree_start_level(rtree_t *rtree, uintptr_t key)
-{
- unsigned start_level;
+typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
+extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
+#ifdef JEMALLOC_JET
+void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
+#endif
+rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
- if (unlikely(key == 0))
- return (rtree->height - 1);
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leafkey(uintptr_t key) {
+ unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
+ unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ unsigned maskbits = ptrbits - cumbits;
+ uintptr_t mask = ~((ZU(1) << maskbits) - 1);
+ return (key & mask);
+}
- start_level = rtree->start_level[lg_floor(key) >>
- LG_RTREE_BITS_PER_LEVEL];
- assert(start_level < rtree->height);
- return (start_level);
+JEMALLOC_ALWAYS_INLINE size_t
+rtree_cache_direct_map(uintptr_t key) {
+ unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
+ unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ unsigned maskbits = ptrbits - cumbits;
+ return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
-{
+rtree_subkey(uintptr_t key, unsigned level) {
+ unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
+ unsigned cumbits = rtree_levels[level].cumbits;
+ unsigned shiftbits = ptrbits - cumbits;
+ unsigned maskbits = rtree_levels[level].bits;
+ uintptr_t mask = (ZU(1) << maskbits) - 1;
+ return ((key >> shiftbits) & mask);
+}
- return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
- rtree->levels[level].cumbits)) & ((ZU(1) <<
- rtree->levels[level].bits) - 1));
+/*
+ * Atomic getters.
+ *
+ * dependent: Reading a value on behalf of a pointer to a valid allocation
+ * is guaranteed to be a clean read even without synchronization,
+ * because the rtree update became visible in memory before the
+ * pointer came into existence.
+ * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
+ * dependent on a previous rtree write, which means a stale read
+ * could result if synchronization were omitted here.
+ */
+# ifdef RTREE_LEAF_COMPACT
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool dependent) {
+ return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
+ ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
+}
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
+ /* Restore sign-extended high bits, mask slab bit. */
+ return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
+ RTREE_NHIB) & ~((uintptr_t)0x1));
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
+ return (szind_t)(bits >> LG_VADDR);
}
JEMALLOC_ALWAYS_INLINE bool
-rtree_node_valid(rtree_node_elm_t *node)
-{
+rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
+ return (bool)(bits & (uintptr_t)0x1);
+}
- return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
+# endif
+
+JEMALLOC_ALWAYS_INLINE extent_t *
+rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool dependent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_bits_extent_get(bits);
+#else
+ extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
+ ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
+ return extent;
+#endif
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
-{
- rtree_node_elm_t *child;
-
- /* Double-checked read (first read may be stale. */
- child = elm->child;
- if (!dependent && !rtree_node_valid(child))
- child = atomic_read_p(&elm->pun);
- assert(!dependent || child != NULL);
- return (child);
+JEMALLOC_ALWAYS_INLINE szind_t
+rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool dependent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_bits_szind_get(bits);
+#else
+ return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
+ : ATOMIC_ACQUIRE);
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool dependent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_bits_slab_get(bits);
+#else
+ return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
+ ATOMIC_ACQUIRE);
+#endif
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
- bool dependent)
-{
- rtree_node_elm_t *child;
-
- child = rtree_child_tryread(elm, dependent);
- if (!dependent && unlikely(!rtree_node_valid(child)))
- child = rtree_child_read_hard(rtree, elm, level);
- assert(!dependent || child != NULL);
- return (child);
+static inline void
+rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ extent_t *extent) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
+ uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
+ LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
+ | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
+#endif
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
-rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
-{
-
- if (dependent) {
- /*
- * Reading a val on behalf of a pointer to a valid allocation is
- * guaranteed to be a clean read even without synchronization,
- * because the rtree update became visible in memory before the
- * pointer came into existence.
- */
- return (elm->val);
- } else {
- /*
- * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
- * dependent on a previous rtree write, which means a stale read
- * could result if synchronization were omitted here.
- */
- return (atomic_read_p(&elm->pun));
+static inline void
+rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ szind_t szind) {
+ assert(szind <= NSIZES);
+
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
+ true);
+ uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
+ ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
+ (((uintptr_t)0x1 << LG_VADDR) - 1)) |
+ ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
+#endif
+}
+
+static inline void
+rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool slab) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
+ true);
+ uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
+ LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
+ (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
+#endif
+}
+
+static inline void
+rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ extent_t *extent, szind_t szind, bool slab) {
+#ifdef RTREE_LEAF_COMPACT
+ uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
+ ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
+ ((uintptr_t)slab);
+ atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+#else
+ rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
+ rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
+ /*
+ * Write extent last, since the element is atomically considered valid
+ * as soon as the extent field is non-NULL.
+ */
+ rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
+#endif
+}
+
+static inline void
+rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
+ assert(!slab || szind < NBINS);
+
+ /*
+ * The caller implicitly assures that it is the only writer to the szind
+ * and slab fields, and that the extent field cannot currently change.
+ */
+ rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
+ rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
+rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, bool init_missing) {
+ assert(key != 0);
+ assert(!dependent || !init_missing);
+
+ size_t slot = rtree_cache_direct_map(key);
+ uintptr_t leafkey = rtree_leafkey(key);
+ assert(leafkey != RTREE_LEAFKEY_INVALID);
+
+ /* Fast path: L1 direct mapped cache. */
+ if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
+ rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
+ assert(leaf != NULL);
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
+ return &leaf[subkey];
}
+ /*
+ * Search the L2 LRU cache. On hit, swap the matching element into the
+ * slot in L1 cache, and move the position in L2 up by 1.
+ */
+#define RTREE_CACHE_CHECK_L2(i) do { \
+ if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
+ rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
+ assert(leaf != NULL); \
+ if (i > 0) { \
+ /* Bubble up by one. */ \
+ rtree_ctx->l2_cache[i].leafkey = \
+ rtree_ctx->l2_cache[i - 1].leafkey; \
+ rtree_ctx->l2_cache[i].leaf = \
+ rtree_ctx->l2_cache[i - 1].leaf; \
+ rtree_ctx->l2_cache[i - 1].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[i - 1].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ } else { \
+ rtree_ctx->l2_cache[0].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[0].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ } \
+ rtree_ctx->cache[slot].leafkey = leafkey; \
+ rtree_ctx->cache[slot].leaf = leaf; \
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
+ return &leaf[subkey]; \
+ } \
+} while (0)
+ /* Check the first cache entry. */
+ RTREE_CACHE_CHECK_L2(0);
+ /* Search the remaining cache elements. */
+ for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
+ RTREE_CACHE_CHECK_L2(i);
+ }
+#undef RTREE_CACHE_CHECK_L2
+
+ return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
+ dependent, init_missing);
}
-JEMALLOC_INLINE void
-rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
-{
+static inline bool
+rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
+ extent_t *extent, szind_t szind, bool slab) {
+ /* Use rtree_clear() to set the extent to NULL. */
+ assert(extent != NULL);
- atomic_write_p(&elm->pun, val);
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, false, true);
+ if (elm == NULL) {
+ return true;
+ }
+
+ assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
+ rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
+
+ return false;
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
-{
- rtree_node_elm_t *subtree;
-
- /* Double-checked read (first read may be stale. */
- subtree = rtree->levels[level].subtree;
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
- subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
- assert(!dependent || subtree != NULL);
- return (subtree);
+JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
+rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
+ bool dependent) {
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, dependent, false);
+ if (!dependent && elm == NULL) {
+ return NULL;
+ }
+ assert(elm != NULL);
+ return elm;
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
-{
- rtree_node_elm_t *subtree;
+JEMALLOC_ALWAYS_INLINE extent_t *
+rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return NULL;
+ }
+ return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
+}
- subtree = rtree_subtree_tryread(rtree, level, dependent);
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
- subtree = rtree_subtree_read_hard(rtree, level);
- assert(!dependent || subtree != NULL);
- return (subtree);
+JEMALLOC_ALWAYS_INLINE szind_t
+rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return NSIZES;
+ }
+ return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
-rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
-{
- uintptr_t subkey;
- unsigned start_level;
- rtree_node_elm_t *node;
-
- start_level = rtree_start_level(rtree, key);
-
- node = rtree_subtree_tryread(rtree, start_level, dependent);
-#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
- switch (start_level + RTREE_GET_BIAS) {
-#define RTREE_GET_SUBTREE(level) \
- case level: \
- assert(level < (RTREE_HEIGHT_MAX-1)); \
- if (!dependent && unlikely(!rtree_node_valid(node))) \
- return (NULL); \
- subkey = rtree_subkey(rtree, key, level - \
- RTREE_GET_BIAS); \
- node = rtree_child_tryread(&node[subkey], dependent); \
- /* Fall through. */
-#define RTREE_GET_LEAF(level) \
- case level: \
- assert(level == (RTREE_HEIGHT_MAX-1)); \
- if (!dependent && unlikely(!rtree_node_valid(node))) \
- return (NULL); \
- subkey = rtree_subkey(rtree, key, level - \
- RTREE_GET_BIAS); \
- /* \
- * node is a leaf, so it contains values rather than \
- * child pointers. \
- */ \
- return (rtree_val_read(rtree, &node[subkey], \
- dependent));
-#if RTREE_HEIGHT_MAX > 1
- RTREE_GET_SUBTREE(0)
-#endif
-#if RTREE_HEIGHT_MAX > 2
- RTREE_GET_SUBTREE(1)
-#endif
-#if RTREE_HEIGHT_MAX > 3
- RTREE_GET_SUBTREE(2)
-#endif
-#if RTREE_HEIGHT_MAX > 4
- RTREE_GET_SUBTREE(3)
-#endif
-#if RTREE_HEIGHT_MAX > 5
- RTREE_GET_SUBTREE(4)
-#endif
-#if RTREE_HEIGHT_MAX > 6
- RTREE_GET_SUBTREE(5)
-#endif
-#if RTREE_HEIGHT_MAX > 7
- RTREE_GET_SUBTREE(6)
-#endif
-#if RTREE_HEIGHT_MAX > 8
- RTREE_GET_SUBTREE(7)
-#endif
-#if RTREE_HEIGHT_MAX > 9
- RTREE_GET_SUBTREE(8)
-#endif
-#if RTREE_HEIGHT_MAX > 10
- RTREE_GET_SUBTREE(9)
-#endif
-#if RTREE_HEIGHT_MAX > 11
- RTREE_GET_SUBTREE(10)
-#endif
-#if RTREE_HEIGHT_MAX > 12
- RTREE_GET_SUBTREE(11)
-#endif
-#if RTREE_HEIGHT_MAX > 13
- RTREE_GET_SUBTREE(12)
-#endif
-#if RTREE_HEIGHT_MAX > 14
- RTREE_GET_SUBTREE(13)
-#endif
-#if RTREE_HEIGHT_MAX > 15
- RTREE_GET_SUBTREE(14)
-#endif
-#if RTREE_HEIGHT_MAX > 16
-# error Unsupported RTREE_HEIGHT_MAX
-#endif
- RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
-#undef RTREE_GET_SUBTREE
-#undef RTREE_GET_LEAF
- default: not_reached();
+/*
+ * rtree_slab_read() is intentionally omitted because slab is always read in
+ * conjunction with szind, which makes rtree_szind_slab_read() a better choice.
+ */
+
+JEMALLOC_ALWAYS_INLINE bool
+rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return true;
}
-#undef RTREE_GET_BIAS
- not_reached();
+ *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
+ *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+ return false;
}
-JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
-{
- uintptr_t subkey;
- unsigned i, start_level;
- rtree_node_elm_t *node, *child;
-
- start_level = rtree_start_level(rtree, key);
-
- node = rtree_subtree_read(rtree, start_level, false);
- if (node == NULL)
- return (true);
- for (i = start_level; /**/; i++, node = child) {
- subkey = rtree_subkey(rtree, key, i);
- if (i == rtree->height - 1) {
- /*
- * node is a leaf, so it contains values rather than
- * child pointers.
- */
- rtree_val_write(rtree, &node[subkey], val);
- return (false);
- }
- assert(i + 1 < rtree->height);
- child = rtree_child_read(rtree, &node[subkey], i, false);
- if (child == NULL)
- return (true);
+JEMALLOC_ALWAYS_INLINE bool
+rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
+ dependent);
+ if (!dependent && elm == NULL) {
+ return true;
}
- not_reached();
+ *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+ *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
+ return false;
+}
+
+static inline void
+rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, szind_t szind, bool slab) {
+ assert(!slab || szind < NBINS);
+
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
+ rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
+}
+
+static inline void
+rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
+ rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
+ assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
+ NULL);
+ rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_RTREE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h b/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h
new file mode 100644
index 000000000000..3cdc8625487c
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h
@@ -0,0 +1,50 @@
+#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
+#define JEMALLOC_INTERNAL_RTREE_CTX_H
+
+/*
+ * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
+ * entry supports an entire leaf, so the cache hit rate is typically high even
+ * with a small number of entries. In rare cases extent activity will straddle
+ * the boundary between two leaf nodes. Furthermore, an arena may use a
+ * combination of dss and mmap. Note that as memory usage grows past the amount
+ * that this cache can directly cover, the cache will become less effective if
+ * locality of reference is low, but the consequence is merely cache misses
+ * while traversing the tree nodes.
+ *
+ * The L1 direct mapped cache offers consistent and low cost on cache hit.
+ * However collision could affect hit rate negatively. This is resolved by
+ * combining with a L2 LRU cache, which requires linear search and re-ordering
+ * on access but suffers no collision. Note that, the cache will itself suffer
+ * cache misses if made overly large, plus the cost of linear search in the LRU
+ * cache.
+ */
+#define RTREE_CTX_LG_NCACHE 4
+#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
+#define RTREE_CTX_NCACHE_L2 8
+
+/*
+ * Zero initializer required for tsd initialization only. Proper initialization
+ * done via rtree_ctx_data_init().
+ */
+#define RTREE_CTX_ZERO_INITIALIZER {{{0}}}
+
+
+typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
+
+typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
+struct rtree_ctx_cache_elm_s {
+ uintptr_t leafkey;
+ rtree_leaf_elm_t *leaf;
+};
+
+typedef struct rtree_ctx_s rtree_ctx_t;
+struct rtree_ctx_s {
+ /* Direct mapped cache. */
+ rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
+ /* L2 LRU cache. */
+ rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
+};
+
+void rtree_ctx_data_init(rtree_ctx_t *ctx);
+
+#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/size_classes.h b/contrib/jemalloc/include/jemalloc/internal/size_classes.h
index b12d262bb06b..9fe4f93cbd8b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/size_classes.h
+++ b/contrib/jemalloc/include/jemalloc/internal/size_classes.h
@@ -1,5490 +1,5544 @@
+#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
+#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
+
/* This file was automatically generated by size_classes.sh. */
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
/*
- * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
- * be defined prior to inclusion, and it in turn defines:
+ * This header file defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
+ * LG_TINY_MIN: Lg of minimum size class to support.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
- * bin, lg_delta_lookup) tuples.
+ * bin, pgs, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
+ * pgs: Slab page count if a small bin size class, 0 otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
+ * LG_CEIL_NSIZES: Number of bits required to store NSIZES.
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
- * HUGE_MAXCLASS: Maximum (huge) size class.
+ * LARGE_MAXCLASS: Maximum (large) size class.
*/
-#define LG_SIZE_CLASS_GROUP 2
+#define LG_SIZE_CLASS_GROUP 2
+#define LG_TINY_MIN 3
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, yes, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, yes, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, yes, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, yes, no, no) \
- \
- SC( 40, 14, 12, 1, yes, no, no) \
- SC( 41, 14, 12, 2, yes, no, no) \
- SC( 42, 14, 12, 3, yes, no, no) \
- SC( 43, 14, 12, 4, yes, no, no) \
- \
- SC( 44, 15, 13, 1, yes, no, no) \
- SC( 45, 15, 13, 2, yes, no, no) \
- SC( 46, 15, 13, 3, yes, no, no) \
- SC( 47, 15, 13, 4, yes, no, no) \
- \
- SC( 48, 16, 14, 1, yes, no, no) \
- SC( 49, 16, 14, 2, yes, no, no) \
- SC( 50, 16, 14, 3, yes, no, no) \
- SC( 51, 16, 14, 4, yes, no, no) \
- \
- SC( 52, 17, 15, 1, yes, no, no) \
- SC( 53, 17, 15, 2, yes, no, no) \
- SC( 54, 17, 15, 3, yes, no, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, yes, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, yes, yes, 2, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, yes, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, yes, no, 0, no) \
+ \
+ SC( 40, 14, 12, 1, yes, no, 0, no) \
+ SC( 41, 14, 12, 2, yes, no, 0, no) \
+ SC( 42, 14, 12, 3, yes, no, 0, no) \
+ SC( 43, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 44, 15, 13, 1, yes, no, 0, no) \
+ SC( 45, 15, 13, 2, yes, no, 0, no) \
+ SC( 46, 15, 13, 3, yes, no, 0, no) \
+ SC( 47, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 48, 16, 14, 1, yes, no, 0, no) \
+ SC( 49, 16, 14, 2, yes, no, 0, no) \
+ SC( 50, 16, 14, 3, yes, no, 0, no) \
+ SC( 51, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 52, 17, 15, 1, yes, no, 0, no) \
+ SC( 53, 17, 15, 2, yes, no, 0, no) \
+ SC( 54, 17, 15, 3, yes, no, 0, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 39
-#define NSIZES 107
-#define NPSIZES 71
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
-#define LG_LARGE_MINCLASS 14
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 39
+#define NSIZES 107
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 71
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
+#define LG_LARGE_MINCLASS 14
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, no, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, yes, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, no, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, yes, yes, no) \
- \
- SC( 40, 14, 12, 1, no, yes, no) \
- SC( 41, 14, 12, 2, yes, yes, no) \
- SC( 42, 14, 12, 3, no, yes, no) \
- SC( 43, 14, 12, 4, yes, no, no) \
- \
- SC( 44, 15, 13, 1, yes, no, no) \
- SC( 45, 15, 13, 2, yes, no, no) \
- SC( 46, 15, 13, 3, yes, no, no) \
- SC( 47, 15, 13, 4, yes, no, no) \
- \
- SC( 48, 16, 14, 1, yes, no, no) \
- SC( 49, 16, 14, 2, yes, no, no) \
- SC( 50, 16, 14, 3, yes, no, no) \
- SC( 51, 16, 14, 4, yes, no, no) \
- \
- SC( 52, 17, 15, 1, yes, no, no) \
- SC( 53, 17, 15, 2, yes, no, no) \
- SC( 54, 17, 15, 3, yes, no, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, yes, yes, 1, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, no, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, yes, yes, 2, no) \
+ \
+ SC( 40, 14, 12, 1, no, yes, 5, no) \
+ SC( 41, 14, 12, 2, yes, yes, 3, no) \
+ SC( 42, 14, 12, 3, no, yes, 7, no) \
+ SC( 43, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 44, 15, 13, 1, yes, no, 0, no) \
+ SC( 45, 15, 13, 2, yes, no, 0, no) \
+ SC( 46, 15, 13, 3, yes, no, 0, no) \
+ SC( 47, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 48, 16, 14, 1, yes, no, 0, no) \
+ SC( 49, 16, 14, 2, yes, no, 0, no) \
+ SC( 50, 16, 14, 3, yes, no, 0, no) \
+ SC( 51, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 52, 17, 15, 1, yes, no, 0, no) \
+ SC( 53, 17, 15, 2, yes, no, 0, no) \
+ SC( 54, 17, 15, 3, yes, no, 0, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 43
-#define NSIZES 107
-#define NPSIZES 67
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
-#define LG_LARGE_MINCLASS 15
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 43
+#define NSIZES 107
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 67
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
+#define LG_LARGE_MINCLASS 15
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, no, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, no, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, no, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, yes, yes, no) \
- \
- SC( 40, 14, 12, 1, no, yes, no) \
- SC( 41, 14, 12, 2, no, yes, no) \
- SC( 42, 14, 12, 3, no, yes, no) \
- SC( 43, 14, 12, 4, yes, yes, no) \
- \
- SC( 44, 15, 13, 1, no, yes, no) \
- SC( 45, 15, 13, 2, yes, yes, no) \
- SC( 46, 15, 13, 3, no, yes, no) \
- SC( 47, 15, 13, 4, yes, no, no) \
- \
- SC( 48, 16, 14, 1, yes, no, no) \
- SC( 49, 16, 14, 2, yes, no, no) \
- SC( 50, 16, 14, 3, yes, no, no) \
- SC( 51, 16, 14, 4, yes, no, no) \
- \
- SC( 52, 17, 15, 1, yes, no, no) \
- SC( 53, 17, 15, 2, yes, no, no) \
- SC( 54, 17, 15, 3, yes, no, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, no, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, yes, yes, 1, no) \
+ \
+ SC( 40, 14, 12, 1, no, yes, 5, no) \
+ SC( 41, 14, 12, 2, no, yes, 3, no) \
+ SC( 42, 14, 12, 3, no, yes, 7, no) \
+ SC( 43, 14, 12, 4, yes, yes, 2, no) \
+ \
+ SC( 44, 15, 13, 1, no, yes, 5, no) \
+ SC( 45, 15, 13, 2, yes, yes, 3, no) \
+ SC( 46, 15, 13, 3, no, yes, 7, no) \
+ SC( 47, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 48, 16, 14, 1, yes, no, 0, no) \
+ SC( 49, 16, 14, 2, yes, no, 0, no) \
+ SC( 50, 16, 14, 3, yes, no, 0, no) \
+ SC( 51, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 52, 17, 15, 1, yes, no, 0, no) \
+ SC( 53, 17, 15, 2, yes, no, 0, no) \
+ SC( 54, 17, 15, 3, yes, no, 0, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 47
-#define NSIZES 107
-#define NPSIZES 63
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
-#define LG_LARGE_MINCLASS 16
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 47
+#define NSIZES 107
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 63
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
+#define LG_LARGE_MINCLASS 16
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, no, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, no, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, no, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, no, yes, no) \
- \
- SC( 40, 14, 12, 1, no, yes, no) \
- SC( 41, 14, 12, 2, no, yes, no) \
- SC( 42, 14, 12, 3, no, yes, no) \
- SC( 43, 14, 12, 4, no, yes, no) \
- \
- SC( 44, 15, 13, 1, no, yes, no) \
- SC( 45, 15, 13, 2, no, yes, no) \
- SC( 46, 15, 13, 3, no, yes, no) \
- SC( 47, 15, 13, 4, yes, yes, no) \
- \
- SC( 48, 16, 14, 1, no, yes, no) \
- SC( 49, 16, 14, 2, no, yes, no) \
- SC( 50, 16, 14, 3, no, yes, no) \
- SC( 51, 16, 14, 4, yes, yes, no) \
- \
- SC( 52, 17, 15, 1, no, yes, no) \
- SC( 53, 17, 15, 2, yes, yes, no) \
- SC( 54, 17, 15, 3, no, yes, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, no, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, no, yes, 1, no) \
+ \
+ SC( 40, 14, 12, 1, no, yes, 5, no) \
+ SC( 41, 14, 12, 2, no, yes, 3, no) \
+ SC( 42, 14, 12, 3, no, yes, 7, no) \
+ SC( 43, 14, 12, 4, no, yes, 1, no) \
+ \
+ SC( 44, 15, 13, 1, no, yes, 5, no) \
+ SC( 45, 15, 13, 2, no, yes, 3, no) \
+ SC( 46, 15, 13, 3, no, yes, 7, no) \
+ SC( 47, 15, 13, 4, yes, yes, 1, no) \
+ \
+ SC( 48, 16, 14, 1, no, yes, 5, no) \
+ SC( 49, 16, 14, 2, no, yes, 3, no) \
+ SC( 50, 16, 14, 3, no, yes, 7, no) \
+ SC( 51, 16, 14, 4, yes, yes, 2, no) \
+ \
+ SC( 52, 17, 15, 1, no, yes, 5, no) \
+ SC( 53, 17, 15, 2, yes, yes, 3, no) \
+ SC( 54, 17, 15, 3, no, yes, 7, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 55
-#define NSIZES 107
-#define NPSIZES 55
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
-#define LG_LARGE_MINCLASS 18
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 55
+#define NSIZES 107
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 55
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
+#define LG_LARGE_MINCLASS 18
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, yes, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, yes, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, yes, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, yes, no, no) \
- \
- SC( 37, 14, 12, 1, yes, no, no) \
- SC( 38, 14, 12, 2, yes, no, no) \
- SC( 39, 14, 12, 3, yes, no, no) \
- SC( 40, 14, 12, 4, yes, no, no) \
- \
- SC( 41, 15, 13, 1, yes, no, no) \
- SC( 42, 15, 13, 2, yes, no, no) \
- SC( 43, 15, 13, 3, yes, no, no) \
- SC( 44, 15, 13, 4, yes, no, no) \
- \
- SC( 45, 16, 14, 1, yes, no, no) \
- SC( 46, 16, 14, 2, yes, no, no) \
- SC( 47, 16, 14, 3, yes, no, no) \
- SC( 48, 16, 14, 4, yes, no, no) \
- \
- SC( 49, 17, 15, 1, yes, no, no) \
- SC( 50, 17, 15, 2, yes, no, no) \
- SC( 51, 17, 15, 3, yes, no, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, yes, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, yes, yes, 2, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, yes, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, yes, no, 0, no) \
+ \
+ SC( 37, 14, 12, 1, yes, no, 0, no) \
+ SC( 38, 14, 12, 2, yes, no, 0, no) \
+ SC( 39, 14, 12, 3, yes, no, 0, no) \
+ SC( 40, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 41, 15, 13, 1, yes, no, 0, no) \
+ SC( 42, 15, 13, 2, yes, no, 0, no) \
+ SC( 43, 15, 13, 3, yes, no, 0, no) \
+ SC( 44, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 45, 16, 14, 1, yes, no, 0, no) \
+ SC( 46, 16, 14, 2, yes, no, 0, no) \
+ SC( 47, 16, 14, 3, yes, no, 0, no) \
+ SC( 48, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 49, 17, 15, 1, yes, no, 0, no) \
+ SC( 50, 17, 15, 2, yes, no, 0, no) \
+ SC( 51, 17, 15, 3, yes, no, 0, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 36
-#define NSIZES 104
-#define NPSIZES 71
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
-#define LG_LARGE_MINCLASS 14
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 36
+#define NSIZES 104
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 71
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
+#define LG_LARGE_MINCLASS 14
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, no, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, yes, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, no, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, yes, yes, no) \
- \
- SC( 37, 14, 12, 1, no, yes, no) \
- SC( 38, 14, 12, 2, yes, yes, no) \
- SC( 39, 14, 12, 3, no, yes, no) \
- SC( 40, 14, 12, 4, yes, no, no) \
- \
- SC( 41, 15, 13, 1, yes, no, no) \
- SC( 42, 15, 13, 2, yes, no, no) \
- SC( 43, 15, 13, 3, yes, no, no) \
- SC( 44, 15, 13, 4, yes, no, no) \
- \
- SC( 45, 16, 14, 1, yes, no, no) \
- SC( 46, 16, 14, 2, yes, no, no) \
- SC( 47, 16, 14, 3, yes, no, no) \
- SC( 48, 16, 14, 4, yes, no, no) \
- \
- SC( 49, 17, 15, 1, yes, no, no) \
- SC( 50, 17, 15, 2, yes, no, no) \
- SC( 51, 17, 15, 3, yes, no, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, yes, yes, 1, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, no, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, yes, yes, 2, no) \
+ \
+ SC( 37, 14, 12, 1, no, yes, 5, no) \
+ SC( 38, 14, 12, 2, yes, yes, 3, no) \
+ SC( 39, 14, 12, 3, no, yes, 7, no) \
+ SC( 40, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 41, 15, 13, 1, yes, no, 0, no) \
+ SC( 42, 15, 13, 2, yes, no, 0, no) \
+ SC( 43, 15, 13, 3, yes, no, 0, no) \
+ SC( 44, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 45, 16, 14, 1, yes, no, 0, no) \
+ SC( 46, 16, 14, 2, yes, no, 0, no) \
+ SC( 47, 16, 14, 3, yes, no, 0, no) \
+ SC( 48, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 49, 17, 15, 1, yes, no, 0, no) \
+ SC( 50, 17, 15, 2, yes, no, 0, no) \
+ SC( 51, 17, 15, 3, yes, no, 0, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 40
-#define NSIZES 104
-#define NPSIZES 67
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
-#define LG_LARGE_MINCLASS 15
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 40
+#define NSIZES 104
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 67
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
+#define LG_LARGE_MINCLASS 15
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, no, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, no, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, no, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, yes, yes, no) \
- \
- SC( 37, 14, 12, 1, no, yes, no) \
- SC( 38, 14, 12, 2, no, yes, no) \
- SC( 39, 14, 12, 3, no, yes, no) \
- SC( 40, 14, 12, 4, yes, yes, no) \
- \
- SC( 41, 15, 13, 1, no, yes, no) \
- SC( 42, 15, 13, 2, yes, yes, no) \
- SC( 43, 15, 13, 3, no, yes, no) \
- SC( 44, 15, 13, 4, yes, no, no) \
- \
- SC( 45, 16, 14, 1, yes, no, no) \
- SC( 46, 16, 14, 2, yes, no, no) \
- SC( 47, 16, 14, 3, yes, no, no) \
- SC( 48, 16, 14, 4, yes, no, no) \
- \
- SC( 49, 17, 15, 1, yes, no, no) \
- SC( 50, 17, 15, 2, yes, no, no) \
- SC( 51, 17, 15, 3, yes, no, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, no, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, yes, yes, 1, no) \
+ \
+ SC( 37, 14, 12, 1, no, yes, 5, no) \
+ SC( 38, 14, 12, 2, no, yes, 3, no) \
+ SC( 39, 14, 12, 3, no, yes, 7, no) \
+ SC( 40, 14, 12, 4, yes, yes, 2, no) \
+ \
+ SC( 41, 15, 13, 1, no, yes, 5, no) \
+ SC( 42, 15, 13, 2, yes, yes, 3, no) \
+ SC( 43, 15, 13, 3, no, yes, 7, no) \
+ SC( 44, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 45, 16, 14, 1, yes, no, 0, no) \
+ SC( 46, 16, 14, 2, yes, no, 0, no) \
+ SC( 47, 16, 14, 3, yes, no, 0, no) \
+ SC( 48, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 49, 17, 15, 1, yes, no, 0, no) \
+ SC( 50, 17, 15, 2, yes, no, 0, no) \
+ SC( 51, 17, 15, 3, yes, no, 0, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 44
-#define NSIZES 104
-#define NPSIZES 63
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
-#define LG_LARGE_MINCLASS 16
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 44
+#define NSIZES 104
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 63
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
+#define LG_LARGE_MINCLASS 16
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, no, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, no, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, no, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, no, yes, no) \
- \
- SC( 37, 14, 12, 1, no, yes, no) \
- SC( 38, 14, 12, 2, no, yes, no) \
- SC( 39, 14, 12, 3, no, yes, no) \
- SC( 40, 14, 12, 4, no, yes, no) \
- \
- SC( 41, 15, 13, 1, no, yes, no) \
- SC( 42, 15, 13, 2, no, yes, no) \
- SC( 43, 15, 13, 3, no, yes, no) \
- SC( 44, 15, 13, 4, yes, yes, no) \
- \
- SC( 45, 16, 14, 1, no, yes, no) \
- SC( 46, 16, 14, 2, no, yes, no) \
- SC( 47, 16, 14, 3, no, yes, no) \
- SC( 48, 16, 14, 4, yes, yes, no) \
- \
- SC( 49, 17, 15, 1, no, yes, no) \
- SC( 50, 17, 15, 2, yes, yes, no) \
- SC( 51, 17, 15, 3, no, yes, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, no, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, no, yes, 1, no) \
+ \
+ SC( 37, 14, 12, 1, no, yes, 5, no) \
+ SC( 38, 14, 12, 2, no, yes, 3, no) \
+ SC( 39, 14, 12, 3, no, yes, 7, no) \
+ SC( 40, 14, 12, 4, no, yes, 1, no) \
+ \
+ SC( 41, 15, 13, 1, no, yes, 5, no) \
+ SC( 42, 15, 13, 2, no, yes, 3, no) \
+ SC( 43, 15, 13, 3, no, yes, 7, no) \
+ SC( 44, 15, 13, 4, yes, yes, 1, no) \
+ \
+ SC( 45, 16, 14, 1, no, yes, 5, no) \
+ SC( 46, 16, 14, 2, no, yes, 3, no) \
+ SC( 47, 16, 14, 3, no, yes, 7, no) \
+ SC( 48, 16, 14, 4, yes, yes, 2, no) \
+ \
+ SC( 49, 17, 15, 1, no, yes, 5, no) \
+ SC( 50, 17, 15, 2, yes, yes, 3, no) \
+ SC( 51, 17, 15, 3, no, yes, 7, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 52
-#define NSIZES 104
-#define NPSIZES 55
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
-#define LG_LARGE_MINCLASS 18
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 52
+#define NSIZES 104
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 55
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
+#define LG_LARGE_MINCLASS 18
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, yes, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, yes, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, yes, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, yes, no, no) \
- \
- SC( 36, 14, 12, 1, yes, no, no) \
- SC( 37, 14, 12, 2, yes, no, no) \
- SC( 38, 14, 12, 3, yes, no, no) \
- SC( 39, 14, 12, 4, yes, no, no) \
- \
- SC( 40, 15, 13, 1, yes, no, no) \
- SC( 41, 15, 13, 2, yes, no, no) \
- SC( 42, 15, 13, 3, yes, no, no) \
- SC( 43, 15, 13, 4, yes, no, no) \
- \
- SC( 44, 16, 14, 1, yes, no, no) \
- SC( 45, 16, 14, 2, yes, no, no) \
- SC( 46, 16, 14, 3, yes, no, no) \
- SC( 47, 16, 14, 4, yes, no, no) \
- \
- SC( 48, 17, 15, 1, yes, no, no) \
- SC( 49, 17, 15, 2, yes, no, no) \
- SC( 50, 17, 15, 3, yes, no, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, yes, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, yes, yes, 2, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, yes, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, yes, no, 0, no) \
+ \
+ SC( 36, 14, 12, 1, yes, no, 0, no) \
+ SC( 37, 14, 12, 2, yes, no, 0, no) \
+ SC( 38, 14, 12, 3, yes, no, 0, no) \
+ SC( 39, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 40, 15, 13, 1, yes, no, 0, no) \
+ SC( 41, 15, 13, 2, yes, no, 0, no) \
+ SC( 42, 15, 13, 3, yes, no, 0, no) \
+ SC( 43, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 44, 16, 14, 1, yes, no, 0, no) \
+ SC( 45, 16, 14, 2, yes, no, 0, no) \
+ SC( 46, 16, 14, 3, yes, no, 0, no) \
+ SC( 47, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 48, 17, 15, 1, yes, no, 0, no) \
+ SC( 49, 17, 15, 2, yes, no, 0, no) \
+ SC( 50, 17, 15, 3, yes, no, 0, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 35
-#define NSIZES 103
-#define NPSIZES 71
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
-#define LG_LARGE_MINCLASS 14
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 35
+#define NSIZES 103
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 71
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
+#define LG_LARGE_MINCLASS 14
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, no, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, yes, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, no, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, yes, yes, no) \
- \
- SC( 36, 14, 12, 1, no, yes, no) \
- SC( 37, 14, 12, 2, yes, yes, no) \
- SC( 38, 14, 12, 3, no, yes, no) \
- SC( 39, 14, 12, 4, yes, no, no) \
- \
- SC( 40, 15, 13, 1, yes, no, no) \
- SC( 41, 15, 13, 2, yes, no, no) \
- SC( 42, 15, 13, 3, yes, no, no) \
- SC( 43, 15, 13, 4, yes, no, no) \
- \
- SC( 44, 16, 14, 1, yes, no, no) \
- SC( 45, 16, 14, 2, yes, no, no) \
- SC( 46, 16, 14, 3, yes, no, no) \
- SC( 47, 16, 14, 4, yes, no, no) \
- \
- SC( 48, 17, 15, 1, yes, no, no) \
- SC( 49, 17, 15, 2, yes, no, no) \
- SC( 50, 17, 15, 3, yes, no, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, yes, yes, 1, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, no, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, yes, yes, 2, no) \
+ \
+ SC( 36, 14, 12, 1, no, yes, 5, no) \
+ SC( 37, 14, 12, 2, yes, yes, 3, no) \
+ SC( 38, 14, 12, 3, no, yes, 7, no) \
+ SC( 39, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 40, 15, 13, 1, yes, no, 0, no) \
+ SC( 41, 15, 13, 2, yes, no, 0, no) \
+ SC( 42, 15, 13, 3, yes, no, 0, no) \
+ SC( 43, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 44, 16, 14, 1, yes, no, 0, no) \
+ SC( 45, 16, 14, 2, yes, no, 0, no) \
+ SC( 46, 16, 14, 3, yes, no, 0, no) \
+ SC( 47, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 48, 17, 15, 1, yes, no, 0, no) \
+ SC( 49, 17, 15, 2, yes, no, 0, no) \
+ SC( 50, 17, 15, 3, yes, no, 0, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 39
-#define NSIZES 103
-#define NPSIZES 67
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
-#define LG_LARGE_MINCLASS 15
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 39
+#define NSIZES 103
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 67
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
+#define LG_LARGE_MINCLASS 15
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, no, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, no, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, no, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, yes, yes, no) \
- \
- SC( 36, 14, 12, 1, no, yes, no) \
- SC( 37, 14, 12, 2, no, yes, no) \
- SC( 38, 14, 12, 3, no, yes, no) \
- SC( 39, 14, 12, 4, yes, yes, no) \
- \
- SC( 40, 15, 13, 1, no, yes, no) \
- SC( 41, 15, 13, 2, yes, yes, no) \
- SC( 42, 15, 13, 3, no, yes, no) \
- SC( 43, 15, 13, 4, yes, no, no) \
- \
- SC( 44, 16, 14, 1, yes, no, no) \
- SC( 45, 16, 14, 2, yes, no, no) \
- SC( 46, 16, 14, 3, yes, no, no) \
- SC( 47, 16, 14, 4, yes, no, no) \
- \
- SC( 48, 17, 15, 1, yes, no, no) \
- SC( 49, 17, 15, 2, yes, no, no) \
- SC( 50, 17, 15, 3, yes, no, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, no, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, yes, yes, 1, no) \
+ \
+ SC( 36, 14, 12, 1, no, yes, 5, no) \
+ SC( 37, 14, 12, 2, no, yes, 3, no) \
+ SC( 38, 14, 12, 3, no, yes, 7, no) \
+ SC( 39, 14, 12, 4, yes, yes, 2, no) \
+ \
+ SC( 40, 15, 13, 1, no, yes, 5, no) \
+ SC( 41, 15, 13, 2, yes, yes, 3, no) \
+ SC( 42, 15, 13, 3, no, yes, 7, no) \
+ SC( 43, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 44, 16, 14, 1, yes, no, 0, no) \
+ SC( 45, 16, 14, 2, yes, no, 0, no) \
+ SC( 46, 16, 14, 3, yes, no, 0, no) \
+ SC( 47, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 48, 17, 15, 1, yes, no, 0, no) \
+ SC( 49, 17, 15, 2, yes, no, 0, no) \
+ SC( 50, 17, 15, 3, yes, no, 0, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 43
-#define NSIZES 103
-#define NPSIZES 63
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
-#define LG_LARGE_MINCLASS 16
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 43
+#define NSIZES 103
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 63
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
+#define LG_LARGE_MINCLASS 16
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, no, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, no, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, no, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, no, yes, no) \
- \
- SC( 36, 14, 12, 1, no, yes, no) \
- SC( 37, 14, 12, 2, no, yes, no) \
- SC( 38, 14, 12, 3, no, yes, no) \
- SC( 39, 14, 12, 4, no, yes, no) \
- \
- SC( 40, 15, 13, 1, no, yes, no) \
- SC( 41, 15, 13, 2, no, yes, no) \
- SC( 42, 15, 13, 3, no, yes, no) \
- SC( 43, 15, 13, 4, yes, yes, no) \
- \
- SC( 44, 16, 14, 1, no, yes, no) \
- SC( 45, 16, 14, 2, no, yes, no) \
- SC( 46, 16, 14, 3, no, yes, no) \
- SC( 47, 16, 14, 4, yes, yes, no) \
- \
- SC( 48, 17, 15, 1, no, yes, no) \
- SC( 49, 17, 15, 2, yes, yes, no) \
- SC( 50, 17, 15, 3, no, yes, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, no, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, no, yes, 1, no) \
+ \
+ SC( 36, 14, 12, 1, no, yes, 5, no) \
+ SC( 37, 14, 12, 2, no, yes, 3, no) \
+ SC( 38, 14, 12, 3, no, yes, 7, no) \
+ SC( 39, 14, 12, 4, no, yes, 1, no) \
+ \
+ SC( 40, 15, 13, 1, no, yes, 5, no) \
+ SC( 41, 15, 13, 2, no, yes, 3, no) \
+ SC( 42, 15, 13, 3, no, yes, 7, no) \
+ SC( 43, 15, 13, 4, yes, yes, 1, no) \
+ \
+ SC( 44, 16, 14, 1, no, yes, 5, no) \
+ SC( 45, 16, 14, 2, no, yes, 3, no) \
+ SC( 46, 16, 14, 3, no, yes, 7, no) \
+ SC( 47, 16, 14, 4, yes, yes, 2, no) \
+ \
+ SC( 48, 17, 15, 1, no, yes, 5, no) \
+ SC( 49, 17, 15, 2, yes, yes, 3, no) \
+ SC( 50, 17, 15, 3, no, yes, 7, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 51
-#define NSIZES 103
-#define NPSIZES 55
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
-#define LG_LARGE_MINCLASS 18
-#define HUGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 51
+#define NSIZES 103
+#define LG_CEIL_NSIZES 7
+#define NPSIZES 55
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
+#define LG_LARGE_MINCLASS 18
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, yes, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, yes, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, yes, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, yes, no, no) \
- \
- SC( 40, 14, 12, 1, yes, no, no) \
- SC( 41, 14, 12, 2, yes, no, no) \
- SC( 42, 14, 12, 3, yes, no, no) \
- SC( 43, 14, 12, 4, yes, no, no) \
- \
- SC( 44, 15, 13, 1, yes, no, no) \
- SC( 45, 15, 13, 2, yes, no, no) \
- SC( 46, 15, 13, 3, yes, no, no) \
- SC( 47, 15, 13, 4, yes, no, no) \
- \
- SC( 48, 16, 14, 1, yes, no, no) \
- SC( 49, 16, 14, 2, yes, no, no) \
- SC( 50, 16, 14, 3, yes, no, no) \
- SC( 51, 16, 14, 4, yes, no, no) \
- \
- SC( 52, 17, 15, 1, yes, no, no) \
- SC( 53, 17, 15, 2, yes, no, no) \
- SC( 54, 17, 15, 3, yes, no, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
- SC(107, 30, 28, 4, yes, no, no) \
- \
- SC(108, 31, 29, 1, yes, no, no) \
- SC(109, 31, 29, 2, yes, no, no) \
- SC(110, 31, 29, 3, yes, no, no) \
- SC(111, 31, 29, 4, yes, no, no) \
- \
- SC(112, 32, 30, 1, yes, no, no) \
- SC(113, 32, 30, 2, yes, no, no) \
- SC(114, 32, 30, 3, yes, no, no) \
- SC(115, 32, 30, 4, yes, no, no) \
- \
- SC(116, 33, 31, 1, yes, no, no) \
- SC(117, 33, 31, 2, yes, no, no) \
- SC(118, 33, 31, 3, yes, no, no) \
- SC(119, 33, 31, 4, yes, no, no) \
- \
- SC(120, 34, 32, 1, yes, no, no) \
- SC(121, 34, 32, 2, yes, no, no) \
- SC(122, 34, 32, 3, yes, no, no) \
- SC(123, 34, 32, 4, yes, no, no) \
- \
- SC(124, 35, 33, 1, yes, no, no) \
- SC(125, 35, 33, 2, yes, no, no) \
- SC(126, 35, 33, 3, yes, no, no) \
- SC(127, 35, 33, 4, yes, no, no) \
- \
- SC(128, 36, 34, 1, yes, no, no) \
- SC(129, 36, 34, 2, yes, no, no) \
- SC(130, 36, 34, 3, yes, no, no) \
- SC(131, 36, 34, 4, yes, no, no) \
- \
- SC(132, 37, 35, 1, yes, no, no) \
- SC(133, 37, 35, 2, yes, no, no) \
- SC(134, 37, 35, 3, yes, no, no) \
- SC(135, 37, 35, 4, yes, no, no) \
- \
- SC(136, 38, 36, 1, yes, no, no) \
- SC(137, 38, 36, 2, yes, no, no) \
- SC(138, 38, 36, 3, yes, no, no) \
- SC(139, 38, 36, 4, yes, no, no) \
- \
- SC(140, 39, 37, 1, yes, no, no) \
- SC(141, 39, 37, 2, yes, no, no) \
- SC(142, 39, 37, 3, yes, no, no) \
- SC(143, 39, 37, 4, yes, no, no) \
- \
- SC(144, 40, 38, 1, yes, no, no) \
- SC(145, 40, 38, 2, yes, no, no) \
- SC(146, 40, 38, 3, yes, no, no) \
- SC(147, 40, 38, 4, yes, no, no) \
- \
- SC(148, 41, 39, 1, yes, no, no) \
- SC(149, 41, 39, 2, yes, no, no) \
- SC(150, 41, 39, 3, yes, no, no) \
- SC(151, 41, 39, 4, yes, no, no) \
- \
- SC(152, 42, 40, 1, yes, no, no) \
- SC(153, 42, 40, 2, yes, no, no) \
- SC(154, 42, 40, 3, yes, no, no) \
- SC(155, 42, 40, 4, yes, no, no) \
- \
- SC(156, 43, 41, 1, yes, no, no) \
- SC(157, 43, 41, 2, yes, no, no) \
- SC(158, 43, 41, 3, yes, no, no) \
- SC(159, 43, 41, 4, yes, no, no) \
- \
- SC(160, 44, 42, 1, yes, no, no) \
- SC(161, 44, 42, 2, yes, no, no) \
- SC(162, 44, 42, 3, yes, no, no) \
- SC(163, 44, 42, 4, yes, no, no) \
- \
- SC(164, 45, 43, 1, yes, no, no) \
- SC(165, 45, 43, 2, yes, no, no) \
- SC(166, 45, 43, 3, yes, no, no) \
- SC(167, 45, 43, 4, yes, no, no) \
- \
- SC(168, 46, 44, 1, yes, no, no) \
- SC(169, 46, 44, 2, yes, no, no) \
- SC(170, 46, 44, 3, yes, no, no) \
- SC(171, 46, 44, 4, yes, no, no) \
- \
- SC(172, 47, 45, 1, yes, no, no) \
- SC(173, 47, 45, 2, yes, no, no) \
- SC(174, 47, 45, 3, yes, no, no) \
- SC(175, 47, 45, 4, yes, no, no) \
- \
- SC(176, 48, 46, 1, yes, no, no) \
- SC(177, 48, 46, 2, yes, no, no) \
- SC(178, 48, 46, 3, yes, no, no) \
- SC(179, 48, 46, 4, yes, no, no) \
- \
- SC(180, 49, 47, 1, yes, no, no) \
- SC(181, 49, 47, 2, yes, no, no) \
- SC(182, 49, 47, 3, yes, no, no) \
- SC(183, 49, 47, 4, yes, no, no) \
- \
- SC(184, 50, 48, 1, yes, no, no) \
- SC(185, 50, 48, 2, yes, no, no) \
- SC(186, 50, 48, 3, yes, no, no) \
- SC(187, 50, 48, 4, yes, no, no) \
- \
- SC(188, 51, 49, 1, yes, no, no) \
- SC(189, 51, 49, 2, yes, no, no) \
- SC(190, 51, 49, 3, yes, no, no) \
- SC(191, 51, 49, 4, yes, no, no) \
- \
- SC(192, 52, 50, 1, yes, no, no) \
- SC(193, 52, 50, 2, yes, no, no) \
- SC(194, 52, 50, 3, yes, no, no) \
- SC(195, 52, 50, 4, yes, no, no) \
- \
- SC(196, 53, 51, 1, yes, no, no) \
- SC(197, 53, 51, 2, yes, no, no) \
- SC(198, 53, 51, 3, yes, no, no) \
- SC(199, 53, 51, 4, yes, no, no) \
- \
- SC(200, 54, 52, 1, yes, no, no) \
- SC(201, 54, 52, 2, yes, no, no) \
- SC(202, 54, 52, 3, yes, no, no) \
- SC(203, 54, 52, 4, yes, no, no) \
- \
- SC(204, 55, 53, 1, yes, no, no) \
- SC(205, 55, 53, 2, yes, no, no) \
- SC(206, 55, 53, 3, yes, no, no) \
- SC(207, 55, 53, 4, yes, no, no) \
- \
- SC(208, 56, 54, 1, yes, no, no) \
- SC(209, 56, 54, 2, yes, no, no) \
- SC(210, 56, 54, 3, yes, no, no) \
- SC(211, 56, 54, 4, yes, no, no) \
- \
- SC(212, 57, 55, 1, yes, no, no) \
- SC(213, 57, 55, 2, yes, no, no) \
- SC(214, 57, 55, 3, yes, no, no) \
- SC(215, 57, 55, 4, yes, no, no) \
- \
- SC(216, 58, 56, 1, yes, no, no) \
- SC(217, 58, 56, 2, yes, no, no) \
- SC(218, 58, 56, 3, yes, no, no) \
- SC(219, 58, 56, 4, yes, no, no) \
- \
- SC(220, 59, 57, 1, yes, no, no) \
- SC(221, 59, 57, 2, yes, no, no) \
- SC(222, 59, 57, 3, yes, no, no) \
- SC(223, 59, 57, 4, yes, no, no) \
- \
- SC(224, 60, 58, 1, yes, no, no) \
- SC(225, 60, 58, 2, yes, no, no) \
- SC(226, 60, 58, 3, yes, no, no) \
- SC(227, 60, 58, 4, yes, no, no) \
- \
- SC(228, 61, 59, 1, yes, no, no) \
- SC(229, 61, 59, 2, yes, no, no) \
- SC(230, 61, 59, 3, yes, no, no) \
- SC(231, 61, 59, 4, yes, no, no) \
- \
- SC(232, 62, 60, 1, yes, no, no) \
- SC(233, 62, 60, 2, yes, no, no) \
- SC(234, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, yes, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, yes, yes, 2, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, yes, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, yes, no, 0, no) \
+ \
+ SC( 40, 14, 12, 1, yes, no, 0, no) \
+ SC( 41, 14, 12, 2, yes, no, 0, no) \
+ SC( 42, 14, 12, 3, yes, no, 0, no) \
+ SC( 43, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 44, 15, 13, 1, yes, no, 0, no) \
+ SC( 45, 15, 13, 2, yes, no, 0, no) \
+ SC( 46, 15, 13, 3, yes, no, 0, no) \
+ SC( 47, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 48, 16, 14, 1, yes, no, 0, no) \
+ SC( 49, 16, 14, 2, yes, no, 0, no) \
+ SC( 50, 16, 14, 3, yes, no, 0, no) \
+ SC( 51, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 52, 17, 15, 1, yes, no, 0, no) \
+ SC( 53, 17, 15, 2, yes, no, 0, no) \
+ SC( 54, 17, 15, 3, yes, no, 0, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
+ SC(107, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(108, 31, 29, 1, yes, no, 0, no) \
+ SC(109, 31, 29, 2, yes, no, 0, no) \
+ SC(110, 31, 29, 3, yes, no, 0, no) \
+ SC(111, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(112, 32, 30, 1, yes, no, 0, no) \
+ SC(113, 32, 30, 2, yes, no, 0, no) \
+ SC(114, 32, 30, 3, yes, no, 0, no) \
+ SC(115, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(116, 33, 31, 1, yes, no, 0, no) \
+ SC(117, 33, 31, 2, yes, no, 0, no) \
+ SC(118, 33, 31, 3, yes, no, 0, no) \
+ SC(119, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(120, 34, 32, 1, yes, no, 0, no) \
+ SC(121, 34, 32, 2, yes, no, 0, no) \
+ SC(122, 34, 32, 3, yes, no, 0, no) \
+ SC(123, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(124, 35, 33, 1, yes, no, 0, no) \
+ SC(125, 35, 33, 2, yes, no, 0, no) \
+ SC(126, 35, 33, 3, yes, no, 0, no) \
+ SC(127, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(128, 36, 34, 1, yes, no, 0, no) \
+ SC(129, 36, 34, 2, yes, no, 0, no) \
+ SC(130, 36, 34, 3, yes, no, 0, no) \
+ SC(131, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(132, 37, 35, 1, yes, no, 0, no) \
+ SC(133, 37, 35, 2, yes, no, 0, no) \
+ SC(134, 37, 35, 3, yes, no, 0, no) \
+ SC(135, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(136, 38, 36, 1, yes, no, 0, no) \
+ SC(137, 38, 36, 2, yes, no, 0, no) \
+ SC(138, 38, 36, 3, yes, no, 0, no) \
+ SC(139, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(140, 39, 37, 1, yes, no, 0, no) \
+ SC(141, 39, 37, 2, yes, no, 0, no) \
+ SC(142, 39, 37, 3, yes, no, 0, no) \
+ SC(143, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(144, 40, 38, 1, yes, no, 0, no) \
+ SC(145, 40, 38, 2, yes, no, 0, no) \
+ SC(146, 40, 38, 3, yes, no, 0, no) \
+ SC(147, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(148, 41, 39, 1, yes, no, 0, no) \
+ SC(149, 41, 39, 2, yes, no, 0, no) \
+ SC(150, 41, 39, 3, yes, no, 0, no) \
+ SC(151, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(152, 42, 40, 1, yes, no, 0, no) \
+ SC(153, 42, 40, 2, yes, no, 0, no) \
+ SC(154, 42, 40, 3, yes, no, 0, no) \
+ SC(155, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(156, 43, 41, 1, yes, no, 0, no) \
+ SC(157, 43, 41, 2, yes, no, 0, no) \
+ SC(158, 43, 41, 3, yes, no, 0, no) \
+ SC(159, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(160, 44, 42, 1, yes, no, 0, no) \
+ SC(161, 44, 42, 2, yes, no, 0, no) \
+ SC(162, 44, 42, 3, yes, no, 0, no) \
+ SC(163, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(164, 45, 43, 1, yes, no, 0, no) \
+ SC(165, 45, 43, 2, yes, no, 0, no) \
+ SC(166, 45, 43, 3, yes, no, 0, no) \
+ SC(167, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(168, 46, 44, 1, yes, no, 0, no) \
+ SC(169, 46, 44, 2, yes, no, 0, no) \
+ SC(170, 46, 44, 3, yes, no, 0, no) \
+ SC(171, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(172, 47, 45, 1, yes, no, 0, no) \
+ SC(173, 47, 45, 2, yes, no, 0, no) \
+ SC(174, 47, 45, 3, yes, no, 0, no) \
+ SC(175, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(176, 48, 46, 1, yes, no, 0, no) \
+ SC(177, 48, 46, 2, yes, no, 0, no) \
+ SC(178, 48, 46, 3, yes, no, 0, no) \
+ SC(179, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(180, 49, 47, 1, yes, no, 0, no) \
+ SC(181, 49, 47, 2, yes, no, 0, no) \
+ SC(182, 49, 47, 3, yes, no, 0, no) \
+ SC(183, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(184, 50, 48, 1, yes, no, 0, no) \
+ SC(185, 50, 48, 2, yes, no, 0, no) \
+ SC(186, 50, 48, 3, yes, no, 0, no) \
+ SC(187, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(188, 51, 49, 1, yes, no, 0, no) \
+ SC(189, 51, 49, 2, yes, no, 0, no) \
+ SC(190, 51, 49, 3, yes, no, 0, no) \
+ SC(191, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(192, 52, 50, 1, yes, no, 0, no) \
+ SC(193, 52, 50, 2, yes, no, 0, no) \
+ SC(194, 52, 50, 3, yes, no, 0, no) \
+ SC(195, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(196, 53, 51, 1, yes, no, 0, no) \
+ SC(197, 53, 51, 2, yes, no, 0, no) \
+ SC(198, 53, 51, 3, yes, no, 0, no) \
+ SC(199, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(200, 54, 52, 1, yes, no, 0, no) \
+ SC(201, 54, 52, 2, yes, no, 0, no) \
+ SC(202, 54, 52, 3, yes, no, 0, no) \
+ SC(203, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(204, 55, 53, 1, yes, no, 0, no) \
+ SC(205, 55, 53, 2, yes, no, 0, no) \
+ SC(206, 55, 53, 3, yes, no, 0, no) \
+ SC(207, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(208, 56, 54, 1, yes, no, 0, no) \
+ SC(209, 56, 54, 2, yes, no, 0, no) \
+ SC(210, 56, 54, 3, yes, no, 0, no) \
+ SC(211, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(212, 57, 55, 1, yes, no, 0, no) \
+ SC(213, 57, 55, 2, yes, no, 0, no) \
+ SC(214, 57, 55, 3, yes, no, 0, no) \
+ SC(215, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(216, 58, 56, 1, yes, no, 0, no) \
+ SC(217, 58, 56, 2, yes, no, 0, no) \
+ SC(218, 58, 56, 3, yes, no, 0, no) \
+ SC(219, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(220, 59, 57, 1, yes, no, 0, no) \
+ SC(221, 59, 57, 2, yes, no, 0, no) \
+ SC(222, 59, 57, 3, yes, no, 0, no) \
+ SC(223, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(224, 60, 58, 1, yes, no, 0, no) \
+ SC(225, 60, 58, 2, yes, no, 0, no) \
+ SC(226, 60, 58, 3, yes, no, 0, no) \
+ SC(227, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(228, 61, 59, 1, yes, no, 0, no) \
+ SC(229, 61, 59, 2, yes, no, 0, no) \
+ SC(230, 61, 59, 3, yes, no, 0, no) \
+ SC(231, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(232, 62, 60, 1, yes, no, 0, no) \
+ SC(233, 62, 60, 2, yes, no, 0, no) \
+ SC(234, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 39
-#define NSIZES 235
-#define NPSIZES 199
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
-#define LG_LARGE_MINCLASS 14
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 39
+#define NSIZES 235
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 199
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
+#define LG_LARGE_MINCLASS 14
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, no, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, yes, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, no, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, yes, yes, no) \
- \
- SC( 40, 14, 12, 1, no, yes, no) \
- SC( 41, 14, 12, 2, yes, yes, no) \
- SC( 42, 14, 12, 3, no, yes, no) \
- SC( 43, 14, 12, 4, yes, no, no) \
- \
- SC( 44, 15, 13, 1, yes, no, no) \
- SC( 45, 15, 13, 2, yes, no, no) \
- SC( 46, 15, 13, 3, yes, no, no) \
- SC( 47, 15, 13, 4, yes, no, no) \
- \
- SC( 48, 16, 14, 1, yes, no, no) \
- SC( 49, 16, 14, 2, yes, no, no) \
- SC( 50, 16, 14, 3, yes, no, no) \
- SC( 51, 16, 14, 4, yes, no, no) \
- \
- SC( 52, 17, 15, 1, yes, no, no) \
- SC( 53, 17, 15, 2, yes, no, no) \
- SC( 54, 17, 15, 3, yes, no, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
- SC(107, 30, 28, 4, yes, no, no) \
- \
- SC(108, 31, 29, 1, yes, no, no) \
- SC(109, 31, 29, 2, yes, no, no) \
- SC(110, 31, 29, 3, yes, no, no) \
- SC(111, 31, 29, 4, yes, no, no) \
- \
- SC(112, 32, 30, 1, yes, no, no) \
- SC(113, 32, 30, 2, yes, no, no) \
- SC(114, 32, 30, 3, yes, no, no) \
- SC(115, 32, 30, 4, yes, no, no) \
- \
- SC(116, 33, 31, 1, yes, no, no) \
- SC(117, 33, 31, 2, yes, no, no) \
- SC(118, 33, 31, 3, yes, no, no) \
- SC(119, 33, 31, 4, yes, no, no) \
- \
- SC(120, 34, 32, 1, yes, no, no) \
- SC(121, 34, 32, 2, yes, no, no) \
- SC(122, 34, 32, 3, yes, no, no) \
- SC(123, 34, 32, 4, yes, no, no) \
- \
- SC(124, 35, 33, 1, yes, no, no) \
- SC(125, 35, 33, 2, yes, no, no) \
- SC(126, 35, 33, 3, yes, no, no) \
- SC(127, 35, 33, 4, yes, no, no) \
- \
- SC(128, 36, 34, 1, yes, no, no) \
- SC(129, 36, 34, 2, yes, no, no) \
- SC(130, 36, 34, 3, yes, no, no) \
- SC(131, 36, 34, 4, yes, no, no) \
- \
- SC(132, 37, 35, 1, yes, no, no) \
- SC(133, 37, 35, 2, yes, no, no) \
- SC(134, 37, 35, 3, yes, no, no) \
- SC(135, 37, 35, 4, yes, no, no) \
- \
- SC(136, 38, 36, 1, yes, no, no) \
- SC(137, 38, 36, 2, yes, no, no) \
- SC(138, 38, 36, 3, yes, no, no) \
- SC(139, 38, 36, 4, yes, no, no) \
- \
- SC(140, 39, 37, 1, yes, no, no) \
- SC(141, 39, 37, 2, yes, no, no) \
- SC(142, 39, 37, 3, yes, no, no) \
- SC(143, 39, 37, 4, yes, no, no) \
- \
- SC(144, 40, 38, 1, yes, no, no) \
- SC(145, 40, 38, 2, yes, no, no) \
- SC(146, 40, 38, 3, yes, no, no) \
- SC(147, 40, 38, 4, yes, no, no) \
- \
- SC(148, 41, 39, 1, yes, no, no) \
- SC(149, 41, 39, 2, yes, no, no) \
- SC(150, 41, 39, 3, yes, no, no) \
- SC(151, 41, 39, 4, yes, no, no) \
- \
- SC(152, 42, 40, 1, yes, no, no) \
- SC(153, 42, 40, 2, yes, no, no) \
- SC(154, 42, 40, 3, yes, no, no) \
- SC(155, 42, 40, 4, yes, no, no) \
- \
- SC(156, 43, 41, 1, yes, no, no) \
- SC(157, 43, 41, 2, yes, no, no) \
- SC(158, 43, 41, 3, yes, no, no) \
- SC(159, 43, 41, 4, yes, no, no) \
- \
- SC(160, 44, 42, 1, yes, no, no) \
- SC(161, 44, 42, 2, yes, no, no) \
- SC(162, 44, 42, 3, yes, no, no) \
- SC(163, 44, 42, 4, yes, no, no) \
- \
- SC(164, 45, 43, 1, yes, no, no) \
- SC(165, 45, 43, 2, yes, no, no) \
- SC(166, 45, 43, 3, yes, no, no) \
- SC(167, 45, 43, 4, yes, no, no) \
- \
- SC(168, 46, 44, 1, yes, no, no) \
- SC(169, 46, 44, 2, yes, no, no) \
- SC(170, 46, 44, 3, yes, no, no) \
- SC(171, 46, 44, 4, yes, no, no) \
- \
- SC(172, 47, 45, 1, yes, no, no) \
- SC(173, 47, 45, 2, yes, no, no) \
- SC(174, 47, 45, 3, yes, no, no) \
- SC(175, 47, 45, 4, yes, no, no) \
- \
- SC(176, 48, 46, 1, yes, no, no) \
- SC(177, 48, 46, 2, yes, no, no) \
- SC(178, 48, 46, 3, yes, no, no) \
- SC(179, 48, 46, 4, yes, no, no) \
- \
- SC(180, 49, 47, 1, yes, no, no) \
- SC(181, 49, 47, 2, yes, no, no) \
- SC(182, 49, 47, 3, yes, no, no) \
- SC(183, 49, 47, 4, yes, no, no) \
- \
- SC(184, 50, 48, 1, yes, no, no) \
- SC(185, 50, 48, 2, yes, no, no) \
- SC(186, 50, 48, 3, yes, no, no) \
- SC(187, 50, 48, 4, yes, no, no) \
- \
- SC(188, 51, 49, 1, yes, no, no) \
- SC(189, 51, 49, 2, yes, no, no) \
- SC(190, 51, 49, 3, yes, no, no) \
- SC(191, 51, 49, 4, yes, no, no) \
- \
- SC(192, 52, 50, 1, yes, no, no) \
- SC(193, 52, 50, 2, yes, no, no) \
- SC(194, 52, 50, 3, yes, no, no) \
- SC(195, 52, 50, 4, yes, no, no) \
- \
- SC(196, 53, 51, 1, yes, no, no) \
- SC(197, 53, 51, 2, yes, no, no) \
- SC(198, 53, 51, 3, yes, no, no) \
- SC(199, 53, 51, 4, yes, no, no) \
- \
- SC(200, 54, 52, 1, yes, no, no) \
- SC(201, 54, 52, 2, yes, no, no) \
- SC(202, 54, 52, 3, yes, no, no) \
- SC(203, 54, 52, 4, yes, no, no) \
- \
- SC(204, 55, 53, 1, yes, no, no) \
- SC(205, 55, 53, 2, yes, no, no) \
- SC(206, 55, 53, 3, yes, no, no) \
- SC(207, 55, 53, 4, yes, no, no) \
- \
- SC(208, 56, 54, 1, yes, no, no) \
- SC(209, 56, 54, 2, yes, no, no) \
- SC(210, 56, 54, 3, yes, no, no) \
- SC(211, 56, 54, 4, yes, no, no) \
- \
- SC(212, 57, 55, 1, yes, no, no) \
- SC(213, 57, 55, 2, yes, no, no) \
- SC(214, 57, 55, 3, yes, no, no) \
- SC(215, 57, 55, 4, yes, no, no) \
- \
- SC(216, 58, 56, 1, yes, no, no) \
- SC(217, 58, 56, 2, yes, no, no) \
- SC(218, 58, 56, 3, yes, no, no) \
- SC(219, 58, 56, 4, yes, no, no) \
- \
- SC(220, 59, 57, 1, yes, no, no) \
- SC(221, 59, 57, 2, yes, no, no) \
- SC(222, 59, 57, 3, yes, no, no) \
- SC(223, 59, 57, 4, yes, no, no) \
- \
- SC(224, 60, 58, 1, yes, no, no) \
- SC(225, 60, 58, 2, yes, no, no) \
- SC(226, 60, 58, 3, yes, no, no) \
- SC(227, 60, 58, 4, yes, no, no) \
- \
- SC(228, 61, 59, 1, yes, no, no) \
- SC(229, 61, 59, 2, yes, no, no) \
- SC(230, 61, 59, 3, yes, no, no) \
- SC(231, 61, 59, 4, yes, no, no) \
- \
- SC(232, 62, 60, 1, yes, no, no) \
- SC(233, 62, 60, 2, yes, no, no) \
- SC(234, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, yes, yes, 1, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, no, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, yes, yes, 2, no) \
+ \
+ SC( 40, 14, 12, 1, no, yes, 5, no) \
+ SC( 41, 14, 12, 2, yes, yes, 3, no) \
+ SC( 42, 14, 12, 3, no, yes, 7, no) \
+ SC( 43, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 44, 15, 13, 1, yes, no, 0, no) \
+ SC( 45, 15, 13, 2, yes, no, 0, no) \
+ SC( 46, 15, 13, 3, yes, no, 0, no) \
+ SC( 47, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 48, 16, 14, 1, yes, no, 0, no) \
+ SC( 49, 16, 14, 2, yes, no, 0, no) \
+ SC( 50, 16, 14, 3, yes, no, 0, no) \
+ SC( 51, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 52, 17, 15, 1, yes, no, 0, no) \
+ SC( 53, 17, 15, 2, yes, no, 0, no) \
+ SC( 54, 17, 15, 3, yes, no, 0, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
+ SC(107, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(108, 31, 29, 1, yes, no, 0, no) \
+ SC(109, 31, 29, 2, yes, no, 0, no) \
+ SC(110, 31, 29, 3, yes, no, 0, no) \
+ SC(111, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(112, 32, 30, 1, yes, no, 0, no) \
+ SC(113, 32, 30, 2, yes, no, 0, no) \
+ SC(114, 32, 30, 3, yes, no, 0, no) \
+ SC(115, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(116, 33, 31, 1, yes, no, 0, no) \
+ SC(117, 33, 31, 2, yes, no, 0, no) \
+ SC(118, 33, 31, 3, yes, no, 0, no) \
+ SC(119, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(120, 34, 32, 1, yes, no, 0, no) \
+ SC(121, 34, 32, 2, yes, no, 0, no) \
+ SC(122, 34, 32, 3, yes, no, 0, no) \
+ SC(123, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(124, 35, 33, 1, yes, no, 0, no) \
+ SC(125, 35, 33, 2, yes, no, 0, no) \
+ SC(126, 35, 33, 3, yes, no, 0, no) \
+ SC(127, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(128, 36, 34, 1, yes, no, 0, no) \
+ SC(129, 36, 34, 2, yes, no, 0, no) \
+ SC(130, 36, 34, 3, yes, no, 0, no) \
+ SC(131, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(132, 37, 35, 1, yes, no, 0, no) \
+ SC(133, 37, 35, 2, yes, no, 0, no) \
+ SC(134, 37, 35, 3, yes, no, 0, no) \
+ SC(135, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(136, 38, 36, 1, yes, no, 0, no) \
+ SC(137, 38, 36, 2, yes, no, 0, no) \
+ SC(138, 38, 36, 3, yes, no, 0, no) \
+ SC(139, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(140, 39, 37, 1, yes, no, 0, no) \
+ SC(141, 39, 37, 2, yes, no, 0, no) \
+ SC(142, 39, 37, 3, yes, no, 0, no) \
+ SC(143, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(144, 40, 38, 1, yes, no, 0, no) \
+ SC(145, 40, 38, 2, yes, no, 0, no) \
+ SC(146, 40, 38, 3, yes, no, 0, no) \
+ SC(147, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(148, 41, 39, 1, yes, no, 0, no) \
+ SC(149, 41, 39, 2, yes, no, 0, no) \
+ SC(150, 41, 39, 3, yes, no, 0, no) \
+ SC(151, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(152, 42, 40, 1, yes, no, 0, no) \
+ SC(153, 42, 40, 2, yes, no, 0, no) \
+ SC(154, 42, 40, 3, yes, no, 0, no) \
+ SC(155, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(156, 43, 41, 1, yes, no, 0, no) \
+ SC(157, 43, 41, 2, yes, no, 0, no) \
+ SC(158, 43, 41, 3, yes, no, 0, no) \
+ SC(159, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(160, 44, 42, 1, yes, no, 0, no) \
+ SC(161, 44, 42, 2, yes, no, 0, no) \
+ SC(162, 44, 42, 3, yes, no, 0, no) \
+ SC(163, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(164, 45, 43, 1, yes, no, 0, no) \
+ SC(165, 45, 43, 2, yes, no, 0, no) \
+ SC(166, 45, 43, 3, yes, no, 0, no) \
+ SC(167, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(168, 46, 44, 1, yes, no, 0, no) \
+ SC(169, 46, 44, 2, yes, no, 0, no) \
+ SC(170, 46, 44, 3, yes, no, 0, no) \
+ SC(171, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(172, 47, 45, 1, yes, no, 0, no) \
+ SC(173, 47, 45, 2, yes, no, 0, no) \
+ SC(174, 47, 45, 3, yes, no, 0, no) \
+ SC(175, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(176, 48, 46, 1, yes, no, 0, no) \
+ SC(177, 48, 46, 2, yes, no, 0, no) \
+ SC(178, 48, 46, 3, yes, no, 0, no) \
+ SC(179, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(180, 49, 47, 1, yes, no, 0, no) \
+ SC(181, 49, 47, 2, yes, no, 0, no) \
+ SC(182, 49, 47, 3, yes, no, 0, no) \
+ SC(183, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(184, 50, 48, 1, yes, no, 0, no) \
+ SC(185, 50, 48, 2, yes, no, 0, no) \
+ SC(186, 50, 48, 3, yes, no, 0, no) \
+ SC(187, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(188, 51, 49, 1, yes, no, 0, no) \
+ SC(189, 51, 49, 2, yes, no, 0, no) \
+ SC(190, 51, 49, 3, yes, no, 0, no) \
+ SC(191, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(192, 52, 50, 1, yes, no, 0, no) \
+ SC(193, 52, 50, 2, yes, no, 0, no) \
+ SC(194, 52, 50, 3, yes, no, 0, no) \
+ SC(195, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(196, 53, 51, 1, yes, no, 0, no) \
+ SC(197, 53, 51, 2, yes, no, 0, no) \
+ SC(198, 53, 51, 3, yes, no, 0, no) \
+ SC(199, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(200, 54, 52, 1, yes, no, 0, no) \
+ SC(201, 54, 52, 2, yes, no, 0, no) \
+ SC(202, 54, 52, 3, yes, no, 0, no) \
+ SC(203, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(204, 55, 53, 1, yes, no, 0, no) \
+ SC(205, 55, 53, 2, yes, no, 0, no) \
+ SC(206, 55, 53, 3, yes, no, 0, no) \
+ SC(207, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(208, 56, 54, 1, yes, no, 0, no) \
+ SC(209, 56, 54, 2, yes, no, 0, no) \
+ SC(210, 56, 54, 3, yes, no, 0, no) \
+ SC(211, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(212, 57, 55, 1, yes, no, 0, no) \
+ SC(213, 57, 55, 2, yes, no, 0, no) \
+ SC(214, 57, 55, 3, yes, no, 0, no) \
+ SC(215, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(216, 58, 56, 1, yes, no, 0, no) \
+ SC(217, 58, 56, 2, yes, no, 0, no) \
+ SC(218, 58, 56, 3, yes, no, 0, no) \
+ SC(219, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(220, 59, 57, 1, yes, no, 0, no) \
+ SC(221, 59, 57, 2, yes, no, 0, no) \
+ SC(222, 59, 57, 3, yes, no, 0, no) \
+ SC(223, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(224, 60, 58, 1, yes, no, 0, no) \
+ SC(225, 60, 58, 2, yes, no, 0, no) \
+ SC(226, 60, 58, 3, yes, no, 0, no) \
+ SC(227, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(228, 61, 59, 1, yes, no, 0, no) \
+ SC(229, 61, 59, 2, yes, no, 0, no) \
+ SC(230, 61, 59, 3, yes, no, 0, no) \
+ SC(231, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(232, 62, 60, 1, yes, no, 0, no) \
+ SC(233, 62, 60, 2, yes, no, 0, no) \
+ SC(234, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 43
-#define NSIZES 235
-#define NPSIZES 195
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
-#define LG_LARGE_MINCLASS 15
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 43
+#define NSIZES 235
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 195
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
+#define LG_LARGE_MINCLASS 15
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, no, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, no, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, no, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, yes, yes, no) \
- \
- SC( 40, 14, 12, 1, no, yes, no) \
- SC( 41, 14, 12, 2, no, yes, no) \
- SC( 42, 14, 12, 3, no, yes, no) \
- SC( 43, 14, 12, 4, yes, yes, no) \
- \
- SC( 44, 15, 13, 1, no, yes, no) \
- SC( 45, 15, 13, 2, yes, yes, no) \
- SC( 46, 15, 13, 3, no, yes, no) \
- SC( 47, 15, 13, 4, yes, no, no) \
- \
- SC( 48, 16, 14, 1, yes, no, no) \
- SC( 49, 16, 14, 2, yes, no, no) \
- SC( 50, 16, 14, 3, yes, no, no) \
- SC( 51, 16, 14, 4, yes, no, no) \
- \
- SC( 52, 17, 15, 1, yes, no, no) \
- SC( 53, 17, 15, 2, yes, no, no) \
- SC( 54, 17, 15, 3, yes, no, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
- SC(107, 30, 28, 4, yes, no, no) \
- \
- SC(108, 31, 29, 1, yes, no, no) \
- SC(109, 31, 29, 2, yes, no, no) \
- SC(110, 31, 29, 3, yes, no, no) \
- SC(111, 31, 29, 4, yes, no, no) \
- \
- SC(112, 32, 30, 1, yes, no, no) \
- SC(113, 32, 30, 2, yes, no, no) \
- SC(114, 32, 30, 3, yes, no, no) \
- SC(115, 32, 30, 4, yes, no, no) \
- \
- SC(116, 33, 31, 1, yes, no, no) \
- SC(117, 33, 31, 2, yes, no, no) \
- SC(118, 33, 31, 3, yes, no, no) \
- SC(119, 33, 31, 4, yes, no, no) \
- \
- SC(120, 34, 32, 1, yes, no, no) \
- SC(121, 34, 32, 2, yes, no, no) \
- SC(122, 34, 32, 3, yes, no, no) \
- SC(123, 34, 32, 4, yes, no, no) \
- \
- SC(124, 35, 33, 1, yes, no, no) \
- SC(125, 35, 33, 2, yes, no, no) \
- SC(126, 35, 33, 3, yes, no, no) \
- SC(127, 35, 33, 4, yes, no, no) \
- \
- SC(128, 36, 34, 1, yes, no, no) \
- SC(129, 36, 34, 2, yes, no, no) \
- SC(130, 36, 34, 3, yes, no, no) \
- SC(131, 36, 34, 4, yes, no, no) \
- \
- SC(132, 37, 35, 1, yes, no, no) \
- SC(133, 37, 35, 2, yes, no, no) \
- SC(134, 37, 35, 3, yes, no, no) \
- SC(135, 37, 35, 4, yes, no, no) \
- \
- SC(136, 38, 36, 1, yes, no, no) \
- SC(137, 38, 36, 2, yes, no, no) \
- SC(138, 38, 36, 3, yes, no, no) \
- SC(139, 38, 36, 4, yes, no, no) \
- \
- SC(140, 39, 37, 1, yes, no, no) \
- SC(141, 39, 37, 2, yes, no, no) \
- SC(142, 39, 37, 3, yes, no, no) \
- SC(143, 39, 37, 4, yes, no, no) \
- \
- SC(144, 40, 38, 1, yes, no, no) \
- SC(145, 40, 38, 2, yes, no, no) \
- SC(146, 40, 38, 3, yes, no, no) \
- SC(147, 40, 38, 4, yes, no, no) \
- \
- SC(148, 41, 39, 1, yes, no, no) \
- SC(149, 41, 39, 2, yes, no, no) \
- SC(150, 41, 39, 3, yes, no, no) \
- SC(151, 41, 39, 4, yes, no, no) \
- \
- SC(152, 42, 40, 1, yes, no, no) \
- SC(153, 42, 40, 2, yes, no, no) \
- SC(154, 42, 40, 3, yes, no, no) \
- SC(155, 42, 40, 4, yes, no, no) \
- \
- SC(156, 43, 41, 1, yes, no, no) \
- SC(157, 43, 41, 2, yes, no, no) \
- SC(158, 43, 41, 3, yes, no, no) \
- SC(159, 43, 41, 4, yes, no, no) \
- \
- SC(160, 44, 42, 1, yes, no, no) \
- SC(161, 44, 42, 2, yes, no, no) \
- SC(162, 44, 42, 3, yes, no, no) \
- SC(163, 44, 42, 4, yes, no, no) \
- \
- SC(164, 45, 43, 1, yes, no, no) \
- SC(165, 45, 43, 2, yes, no, no) \
- SC(166, 45, 43, 3, yes, no, no) \
- SC(167, 45, 43, 4, yes, no, no) \
- \
- SC(168, 46, 44, 1, yes, no, no) \
- SC(169, 46, 44, 2, yes, no, no) \
- SC(170, 46, 44, 3, yes, no, no) \
- SC(171, 46, 44, 4, yes, no, no) \
- \
- SC(172, 47, 45, 1, yes, no, no) \
- SC(173, 47, 45, 2, yes, no, no) \
- SC(174, 47, 45, 3, yes, no, no) \
- SC(175, 47, 45, 4, yes, no, no) \
- \
- SC(176, 48, 46, 1, yes, no, no) \
- SC(177, 48, 46, 2, yes, no, no) \
- SC(178, 48, 46, 3, yes, no, no) \
- SC(179, 48, 46, 4, yes, no, no) \
- \
- SC(180, 49, 47, 1, yes, no, no) \
- SC(181, 49, 47, 2, yes, no, no) \
- SC(182, 49, 47, 3, yes, no, no) \
- SC(183, 49, 47, 4, yes, no, no) \
- \
- SC(184, 50, 48, 1, yes, no, no) \
- SC(185, 50, 48, 2, yes, no, no) \
- SC(186, 50, 48, 3, yes, no, no) \
- SC(187, 50, 48, 4, yes, no, no) \
- \
- SC(188, 51, 49, 1, yes, no, no) \
- SC(189, 51, 49, 2, yes, no, no) \
- SC(190, 51, 49, 3, yes, no, no) \
- SC(191, 51, 49, 4, yes, no, no) \
- \
- SC(192, 52, 50, 1, yes, no, no) \
- SC(193, 52, 50, 2, yes, no, no) \
- SC(194, 52, 50, 3, yes, no, no) \
- SC(195, 52, 50, 4, yes, no, no) \
- \
- SC(196, 53, 51, 1, yes, no, no) \
- SC(197, 53, 51, 2, yes, no, no) \
- SC(198, 53, 51, 3, yes, no, no) \
- SC(199, 53, 51, 4, yes, no, no) \
- \
- SC(200, 54, 52, 1, yes, no, no) \
- SC(201, 54, 52, 2, yes, no, no) \
- SC(202, 54, 52, 3, yes, no, no) \
- SC(203, 54, 52, 4, yes, no, no) \
- \
- SC(204, 55, 53, 1, yes, no, no) \
- SC(205, 55, 53, 2, yes, no, no) \
- SC(206, 55, 53, 3, yes, no, no) \
- SC(207, 55, 53, 4, yes, no, no) \
- \
- SC(208, 56, 54, 1, yes, no, no) \
- SC(209, 56, 54, 2, yes, no, no) \
- SC(210, 56, 54, 3, yes, no, no) \
- SC(211, 56, 54, 4, yes, no, no) \
- \
- SC(212, 57, 55, 1, yes, no, no) \
- SC(213, 57, 55, 2, yes, no, no) \
- SC(214, 57, 55, 3, yes, no, no) \
- SC(215, 57, 55, 4, yes, no, no) \
- \
- SC(216, 58, 56, 1, yes, no, no) \
- SC(217, 58, 56, 2, yes, no, no) \
- SC(218, 58, 56, 3, yes, no, no) \
- SC(219, 58, 56, 4, yes, no, no) \
- \
- SC(220, 59, 57, 1, yes, no, no) \
- SC(221, 59, 57, 2, yes, no, no) \
- SC(222, 59, 57, 3, yes, no, no) \
- SC(223, 59, 57, 4, yes, no, no) \
- \
- SC(224, 60, 58, 1, yes, no, no) \
- SC(225, 60, 58, 2, yes, no, no) \
- SC(226, 60, 58, 3, yes, no, no) \
- SC(227, 60, 58, 4, yes, no, no) \
- \
- SC(228, 61, 59, 1, yes, no, no) \
- SC(229, 61, 59, 2, yes, no, no) \
- SC(230, 61, 59, 3, yes, no, no) \
- SC(231, 61, 59, 4, yes, no, no) \
- \
- SC(232, 62, 60, 1, yes, no, no) \
- SC(233, 62, 60, 2, yes, no, no) \
- SC(234, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, no, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, yes, yes, 1, no) \
+ \
+ SC( 40, 14, 12, 1, no, yes, 5, no) \
+ SC( 41, 14, 12, 2, no, yes, 3, no) \
+ SC( 42, 14, 12, 3, no, yes, 7, no) \
+ SC( 43, 14, 12, 4, yes, yes, 2, no) \
+ \
+ SC( 44, 15, 13, 1, no, yes, 5, no) \
+ SC( 45, 15, 13, 2, yes, yes, 3, no) \
+ SC( 46, 15, 13, 3, no, yes, 7, no) \
+ SC( 47, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 48, 16, 14, 1, yes, no, 0, no) \
+ SC( 49, 16, 14, 2, yes, no, 0, no) \
+ SC( 50, 16, 14, 3, yes, no, 0, no) \
+ SC( 51, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 52, 17, 15, 1, yes, no, 0, no) \
+ SC( 53, 17, 15, 2, yes, no, 0, no) \
+ SC( 54, 17, 15, 3, yes, no, 0, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
+ SC(107, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(108, 31, 29, 1, yes, no, 0, no) \
+ SC(109, 31, 29, 2, yes, no, 0, no) \
+ SC(110, 31, 29, 3, yes, no, 0, no) \
+ SC(111, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(112, 32, 30, 1, yes, no, 0, no) \
+ SC(113, 32, 30, 2, yes, no, 0, no) \
+ SC(114, 32, 30, 3, yes, no, 0, no) \
+ SC(115, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(116, 33, 31, 1, yes, no, 0, no) \
+ SC(117, 33, 31, 2, yes, no, 0, no) \
+ SC(118, 33, 31, 3, yes, no, 0, no) \
+ SC(119, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(120, 34, 32, 1, yes, no, 0, no) \
+ SC(121, 34, 32, 2, yes, no, 0, no) \
+ SC(122, 34, 32, 3, yes, no, 0, no) \
+ SC(123, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(124, 35, 33, 1, yes, no, 0, no) \
+ SC(125, 35, 33, 2, yes, no, 0, no) \
+ SC(126, 35, 33, 3, yes, no, 0, no) \
+ SC(127, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(128, 36, 34, 1, yes, no, 0, no) \
+ SC(129, 36, 34, 2, yes, no, 0, no) \
+ SC(130, 36, 34, 3, yes, no, 0, no) \
+ SC(131, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(132, 37, 35, 1, yes, no, 0, no) \
+ SC(133, 37, 35, 2, yes, no, 0, no) \
+ SC(134, 37, 35, 3, yes, no, 0, no) \
+ SC(135, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(136, 38, 36, 1, yes, no, 0, no) \
+ SC(137, 38, 36, 2, yes, no, 0, no) \
+ SC(138, 38, 36, 3, yes, no, 0, no) \
+ SC(139, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(140, 39, 37, 1, yes, no, 0, no) \
+ SC(141, 39, 37, 2, yes, no, 0, no) \
+ SC(142, 39, 37, 3, yes, no, 0, no) \
+ SC(143, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(144, 40, 38, 1, yes, no, 0, no) \
+ SC(145, 40, 38, 2, yes, no, 0, no) \
+ SC(146, 40, 38, 3, yes, no, 0, no) \
+ SC(147, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(148, 41, 39, 1, yes, no, 0, no) \
+ SC(149, 41, 39, 2, yes, no, 0, no) \
+ SC(150, 41, 39, 3, yes, no, 0, no) \
+ SC(151, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(152, 42, 40, 1, yes, no, 0, no) \
+ SC(153, 42, 40, 2, yes, no, 0, no) \
+ SC(154, 42, 40, 3, yes, no, 0, no) \
+ SC(155, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(156, 43, 41, 1, yes, no, 0, no) \
+ SC(157, 43, 41, 2, yes, no, 0, no) \
+ SC(158, 43, 41, 3, yes, no, 0, no) \
+ SC(159, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(160, 44, 42, 1, yes, no, 0, no) \
+ SC(161, 44, 42, 2, yes, no, 0, no) \
+ SC(162, 44, 42, 3, yes, no, 0, no) \
+ SC(163, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(164, 45, 43, 1, yes, no, 0, no) \
+ SC(165, 45, 43, 2, yes, no, 0, no) \
+ SC(166, 45, 43, 3, yes, no, 0, no) \
+ SC(167, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(168, 46, 44, 1, yes, no, 0, no) \
+ SC(169, 46, 44, 2, yes, no, 0, no) \
+ SC(170, 46, 44, 3, yes, no, 0, no) \
+ SC(171, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(172, 47, 45, 1, yes, no, 0, no) \
+ SC(173, 47, 45, 2, yes, no, 0, no) \
+ SC(174, 47, 45, 3, yes, no, 0, no) \
+ SC(175, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(176, 48, 46, 1, yes, no, 0, no) \
+ SC(177, 48, 46, 2, yes, no, 0, no) \
+ SC(178, 48, 46, 3, yes, no, 0, no) \
+ SC(179, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(180, 49, 47, 1, yes, no, 0, no) \
+ SC(181, 49, 47, 2, yes, no, 0, no) \
+ SC(182, 49, 47, 3, yes, no, 0, no) \
+ SC(183, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(184, 50, 48, 1, yes, no, 0, no) \
+ SC(185, 50, 48, 2, yes, no, 0, no) \
+ SC(186, 50, 48, 3, yes, no, 0, no) \
+ SC(187, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(188, 51, 49, 1, yes, no, 0, no) \
+ SC(189, 51, 49, 2, yes, no, 0, no) \
+ SC(190, 51, 49, 3, yes, no, 0, no) \
+ SC(191, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(192, 52, 50, 1, yes, no, 0, no) \
+ SC(193, 52, 50, 2, yes, no, 0, no) \
+ SC(194, 52, 50, 3, yes, no, 0, no) \
+ SC(195, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(196, 53, 51, 1, yes, no, 0, no) \
+ SC(197, 53, 51, 2, yes, no, 0, no) \
+ SC(198, 53, 51, 3, yes, no, 0, no) \
+ SC(199, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(200, 54, 52, 1, yes, no, 0, no) \
+ SC(201, 54, 52, 2, yes, no, 0, no) \
+ SC(202, 54, 52, 3, yes, no, 0, no) \
+ SC(203, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(204, 55, 53, 1, yes, no, 0, no) \
+ SC(205, 55, 53, 2, yes, no, 0, no) \
+ SC(206, 55, 53, 3, yes, no, 0, no) \
+ SC(207, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(208, 56, 54, 1, yes, no, 0, no) \
+ SC(209, 56, 54, 2, yes, no, 0, no) \
+ SC(210, 56, 54, 3, yes, no, 0, no) \
+ SC(211, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(212, 57, 55, 1, yes, no, 0, no) \
+ SC(213, 57, 55, 2, yes, no, 0, no) \
+ SC(214, 57, 55, 3, yes, no, 0, no) \
+ SC(215, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(216, 58, 56, 1, yes, no, 0, no) \
+ SC(217, 58, 56, 2, yes, no, 0, no) \
+ SC(218, 58, 56, 3, yes, no, 0, no) \
+ SC(219, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(220, 59, 57, 1, yes, no, 0, no) \
+ SC(221, 59, 57, 2, yes, no, 0, no) \
+ SC(222, 59, 57, 3, yes, no, 0, no) \
+ SC(223, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(224, 60, 58, 1, yes, no, 0, no) \
+ SC(225, 60, 58, 2, yes, no, 0, no) \
+ SC(226, 60, 58, 3, yes, no, 0, no) \
+ SC(227, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(228, 61, 59, 1, yes, no, 0, no) \
+ SC(229, 61, 59, 2, yes, no, 0, no) \
+ SC(230, 61, 59, 3, yes, no, 0, no) \
+ SC(231, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(232, 62, 60, 1, yes, no, 0, no) \
+ SC(233, 62, 60, 2, yes, no, 0, no) \
+ SC(234, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 47
-#define NSIZES 235
-#define NPSIZES 191
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
-#define LG_LARGE_MINCLASS 16
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 47
+#define NSIZES 235
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 191
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
+#define LG_LARGE_MINCLASS 16
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 3, 3, 2, no, yes, 3) \
- SC( 3, 3, 3, 3, no, yes, 3) \
- \
- SC( 4, 5, 3, 1, no, yes, 3) \
- SC( 5, 5, 3, 2, no, yes, 3) \
- SC( 6, 5, 3, 3, no, yes, 3) \
- SC( 7, 5, 3, 4, no, yes, 3) \
- \
- SC( 8, 6, 4, 1, no, yes, 4) \
- SC( 9, 6, 4, 2, no, yes, 4) \
- SC( 10, 6, 4, 3, no, yes, 4) \
- SC( 11, 6, 4, 4, no, yes, 4) \
- \
- SC( 12, 7, 5, 1, no, yes, 5) \
- SC( 13, 7, 5, 2, no, yes, 5) \
- SC( 14, 7, 5, 3, no, yes, 5) \
- SC( 15, 7, 5, 4, no, yes, 5) \
- \
- SC( 16, 8, 6, 1, no, yes, 6) \
- SC( 17, 8, 6, 2, no, yes, 6) \
- SC( 18, 8, 6, 3, no, yes, 6) \
- SC( 19, 8, 6, 4, no, yes, 6) \
- \
- SC( 20, 9, 7, 1, no, yes, 7) \
- SC( 21, 9, 7, 2, no, yes, 7) \
- SC( 22, 9, 7, 3, no, yes, 7) \
- SC( 23, 9, 7, 4, no, yes, 7) \
- \
- SC( 24, 10, 8, 1, no, yes, 8) \
- SC( 25, 10, 8, 2, no, yes, 8) \
- SC( 26, 10, 8, 3, no, yes, 8) \
- SC( 27, 10, 8, 4, no, yes, 8) \
- \
- SC( 28, 11, 9, 1, no, yes, 9) \
- SC( 29, 11, 9, 2, no, yes, 9) \
- SC( 30, 11, 9, 3, no, yes, 9) \
- SC( 31, 11, 9, 4, no, yes, 9) \
- \
- SC( 32, 12, 10, 1, no, yes, no) \
- SC( 33, 12, 10, 2, no, yes, no) \
- SC( 34, 12, 10, 3, no, yes, no) \
- SC( 35, 12, 10, 4, no, yes, no) \
- \
- SC( 36, 13, 11, 1, no, yes, no) \
- SC( 37, 13, 11, 2, no, yes, no) \
- SC( 38, 13, 11, 3, no, yes, no) \
- SC( 39, 13, 11, 4, no, yes, no) \
- \
- SC( 40, 14, 12, 1, no, yes, no) \
- SC( 41, 14, 12, 2, no, yes, no) \
- SC( 42, 14, 12, 3, no, yes, no) \
- SC( 43, 14, 12, 4, no, yes, no) \
- \
- SC( 44, 15, 13, 1, no, yes, no) \
- SC( 45, 15, 13, 2, no, yes, no) \
- SC( 46, 15, 13, 3, no, yes, no) \
- SC( 47, 15, 13, 4, yes, yes, no) \
- \
- SC( 48, 16, 14, 1, no, yes, no) \
- SC( 49, 16, 14, 2, no, yes, no) \
- SC( 50, 16, 14, 3, no, yes, no) \
- SC( 51, 16, 14, 4, yes, yes, no) \
- \
- SC( 52, 17, 15, 1, no, yes, no) \
- SC( 53, 17, 15, 2, yes, yes, no) \
- SC( 54, 17, 15, 3, no, yes, no) \
- SC( 55, 17, 15, 4, yes, no, no) \
- \
- SC( 56, 18, 16, 1, yes, no, no) \
- SC( 57, 18, 16, 2, yes, no, no) \
- SC( 58, 18, 16, 3, yes, no, no) \
- SC( 59, 18, 16, 4, yes, no, no) \
- \
- SC( 60, 19, 17, 1, yes, no, no) \
- SC( 61, 19, 17, 2, yes, no, no) \
- SC( 62, 19, 17, 3, yes, no, no) \
- SC( 63, 19, 17, 4, yes, no, no) \
- \
- SC( 64, 20, 18, 1, yes, no, no) \
- SC( 65, 20, 18, 2, yes, no, no) \
- SC( 66, 20, 18, 3, yes, no, no) \
- SC( 67, 20, 18, 4, yes, no, no) \
- \
- SC( 68, 21, 19, 1, yes, no, no) \
- SC( 69, 21, 19, 2, yes, no, no) \
- SC( 70, 21, 19, 3, yes, no, no) \
- SC( 71, 21, 19, 4, yes, no, no) \
- \
- SC( 72, 22, 20, 1, yes, no, no) \
- SC( 73, 22, 20, 2, yes, no, no) \
- SC( 74, 22, 20, 3, yes, no, no) \
- SC( 75, 22, 20, 4, yes, no, no) \
- \
- SC( 76, 23, 21, 1, yes, no, no) \
- SC( 77, 23, 21, 2, yes, no, no) \
- SC( 78, 23, 21, 3, yes, no, no) \
- SC( 79, 23, 21, 4, yes, no, no) \
- \
- SC( 80, 24, 22, 1, yes, no, no) \
- SC( 81, 24, 22, 2, yes, no, no) \
- SC( 82, 24, 22, 3, yes, no, no) \
- SC( 83, 24, 22, 4, yes, no, no) \
- \
- SC( 84, 25, 23, 1, yes, no, no) \
- SC( 85, 25, 23, 2, yes, no, no) \
- SC( 86, 25, 23, 3, yes, no, no) \
- SC( 87, 25, 23, 4, yes, no, no) \
- \
- SC( 88, 26, 24, 1, yes, no, no) \
- SC( 89, 26, 24, 2, yes, no, no) \
- SC( 90, 26, 24, 3, yes, no, no) \
- SC( 91, 26, 24, 4, yes, no, no) \
- \
- SC( 92, 27, 25, 1, yes, no, no) \
- SC( 93, 27, 25, 2, yes, no, no) \
- SC( 94, 27, 25, 3, yes, no, no) \
- SC( 95, 27, 25, 4, yes, no, no) \
- \
- SC( 96, 28, 26, 1, yes, no, no) \
- SC( 97, 28, 26, 2, yes, no, no) \
- SC( 98, 28, 26, 3, yes, no, no) \
- SC( 99, 28, 26, 4, yes, no, no) \
- \
- SC(100, 29, 27, 1, yes, no, no) \
- SC(101, 29, 27, 2, yes, no, no) \
- SC(102, 29, 27, 3, yes, no, no) \
- SC(103, 29, 27, 4, yes, no, no) \
- \
- SC(104, 30, 28, 1, yes, no, no) \
- SC(105, 30, 28, 2, yes, no, no) \
- SC(106, 30, 28, 3, yes, no, no) \
- SC(107, 30, 28, 4, yes, no, no) \
- \
- SC(108, 31, 29, 1, yes, no, no) \
- SC(109, 31, 29, 2, yes, no, no) \
- SC(110, 31, 29, 3, yes, no, no) \
- SC(111, 31, 29, 4, yes, no, no) \
- \
- SC(112, 32, 30, 1, yes, no, no) \
- SC(113, 32, 30, 2, yes, no, no) \
- SC(114, 32, 30, 3, yes, no, no) \
- SC(115, 32, 30, 4, yes, no, no) \
- \
- SC(116, 33, 31, 1, yes, no, no) \
- SC(117, 33, 31, 2, yes, no, no) \
- SC(118, 33, 31, 3, yes, no, no) \
- SC(119, 33, 31, 4, yes, no, no) \
- \
- SC(120, 34, 32, 1, yes, no, no) \
- SC(121, 34, 32, 2, yes, no, no) \
- SC(122, 34, 32, 3, yes, no, no) \
- SC(123, 34, 32, 4, yes, no, no) \
- \
- SC(124, 35, 33, 1, yes, no, no) \
- SC(125, 35, 33, 2, yes, no, no) \
- SC(126, 35, 33, 3, yes, no, no) \
- SC(127, 35, 33, 4, yes, no, no) \
- \
- SC(128, 36, 34, 1, yes, no, no) \
- SC(129, 36, 34, 2, yes, no, no) \
- SC(130, 36, 34, 3, yes, no, no) \
- SC(131, 36, 34, 4, yes, no, no) \
- \
- SC(132, 37, 35, 1, yes, no, no) \
- SC(133, 37, 35, 2, yes, no, no) \
- SC(134, 37, 35, 3, yes, no, no) \
- SC(135, 37, 35, 4, yes, no, no) \
- \
- SC(136, 38, 36, 1, yes, no, no) \
- SC(137, 38, 36, 2, yes, no, no) \
- SC(138, 38, 36, 3, yes, no, no) \
- SC(139, 38, 36, 4, yes, no, no) \
- \
- SC(140, 39, 37, 1, yes, no, no) \
- SC(141, 39, 37, 2, yes, no, no) \
- SC(142, 39, 37, 3, yes, no, no) \
- SC(143, 39, 37, 4, yes, no, no) \
- \
- SC(144, 40, 38, 1, yes, no, no) \
- SC(145, 40, 38, 2, yes, no, no) \
- SC(146, 40, 38, 3, yes, no, no) \
- SC(147, 40, 38, 4, yes, no, no) \
- \
- SC(148, 41, 39, 1, yes, no, no) \
- SC(149, 41, 39, 2, yes, no, no) \
- SC(150, 41, 39, 3, yes, no, no) \
- SC(151, 41, 39, 4, yes, no, no) \
- \
- SC(152, 42, 40, 1, yes, no, no) \
- SC(153, 42, 40, 2, yes, no, no) \
- SC(154, 42, 40, 3, yes, no, no) \
- SC(155, 42, 40, 4, yes, no, no) \
- \
- SC(156, 43, 41, 1, yes, no, no) \
- SC(157, 43, 41, 2, yes, no, no) \
- SC(158, 43, 41, 3, yes, no, no) \
- SC(159, 43, 41, 4, yes, no, no) \
- \
- SC(160, 44, 42, 1, yes, no, no) \
- SC(161, 44, 42, 2, yes, no, no) \
- SC(162, 44, 42, 3, yes, no, no) \
- SC(163, 44, 42, 4, yes, no, no) \
- \
- SC(164, 45, 43, 1, yes, no, no) \
- SC(165, 45, 43, 2, yes, no, no) \
- SC(166, 45, 43, 3, yes, no, no) \
- SC(167, 45, 43, 4, yes, no, no) \
- \
- SC(168, 46, 44, 1, yes, no, no) \
- SC(169, 46, 44, 2, yes, no, no) \
- SC(170, 46, 44, 3, yes, no, no) \
- SC(171, 46, 44, 4, yes, no, no) \
- \
- SC(172, 47, 45, 1, yes, no, no) \
- SC(173, 47, 45, 2, yes, no, no) \
- SC(174, 47, 45, 3, yes, no, no) \
- SC(175, 47, 45, 4, yes, no, no) \
- \
- SC(176, 48, 46, 1, yes, no, no) \
- SC(177, 48, 46, 2, yes, no, no) \
- SC(178, 48, 46, 3, yes, no, no) \
- SC(179, 48, 46, 4, yes, no, no) \
- \
- SC(180, 49, 47, 1, yes, no, no) \
- SC(181, 49, 47, 2, yes, no, no) \
- SC(182, 49, 47, 3, yes, no, no) \
- SC(183, 49, 47, 4, yes, no, no) \
- \
- SC(184, 50, 48, 1, yes, no, no) \
- SC(185, 50, 48, 2, yes, no, no) \
- SC(186, 50, 48, 3, yes, no, no) \
- SC(187, 50, 48, 4, yes, no, no) \
- \
- SC(188, 51, 49, 1, yes, no, no) \
- SC(189, 51, 49, 2, yes, no, no) \
- SC(190, 51, 49, 3, yes, no, no) \
- SC(191, 51, 49, 4, yes, no, no) \
- \
- SC(192, 52, 50, 1, yes, no, no) \
- SC(193, 52, 50, 2, yes, no, no) \
- SC(194, 52, 50, 3, yes, no, no) \
- SC(195, 52, 50, 4, yes, no, no) \
- \
- SC(196, 53, 51, 1, yes, no, no) \
- SC(197, 53, 51, 2, yes, no, no) \
- SC(198, 53, 51, 3, yes, no, no) \
- SC(199, 53, 51, 4, yes, no, no) \
- \
- SC(200, 54, 52, 1, yes, no, no) \
- SC(201, 54, 52, 2, yes, no, no) \
- SC(202, 54, 52, 3, yes, no, no) \
- SC(203, 54, 52, 4, yes, no, no) \
- \
- SC(204, 55, 53, 1, yes, no, no) \
- SC(205, 55, 53, 2, yes, no, no) \
- SC(206, 55, 53, 3, yes, no, no) \
- SC(207, 55, 53, 4, yes, no, no) \
- \
- SC(208, 56, 54, 1, yes, no, no) \
- SC(209, 56, 54, 2, yes, no, no) \
- SC(210, 56, 54, 3, yes, no, no) \
- SC(211, 56, 54, 4, yes, no, no) \
- \
- SC(212, 57, 55, 1, yes, no, no) \
- SC(213, 57, 55, 2, yes, no, no) \
- SC(214, 57, 55, 3, yes, no, no) \
- SC(215, 57, 55, 4, yes, no, no) \
- \
- SC(216, 58, 56, 1, yes, no, no) \
- SC(217, 58, 56, 2, yes, no, no) \
- SC(218, 58, 56, 3, yes, no, no) \
- SC(219, 58, 56, 4, yes, no, no) \
- \
- SC(220, 59, 57, 1, yes, no, no) \
- SC(221, 59, 57, 2, yes, no, no) \
- SC(222, 59, 57, 3, yes, no, no) \
- SC(223, 59, 57, 4, yes, no, no) \
- \
- SC(224, 60, 58, 1, yes, no, no) \
- SC(225, 60, 58, 2, yes, no, no) \
- SC(226, 60, 58, 3, yes, no, no) \
- SC(227, 60, 58, 4, yes, no, no) \
- \
- SC(228, 61, 59, 1, yes, no, no) \
- SC(229, 61, 59, 2, yes, no, no) \
- SC(230, 61, 59, 3, yes, no, no) \
- SC(231, 61, 59, 4, yes, no, no) \
- \
- SC(232, 62, 60, 1, yes, no, no) \
- SC(233, 62, 60, 2, yes, no, no) \
- SC(234, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 3, 3, 2, no, yes, 3, 3) \
+ SC( 3, 3, 3, 3, no, yes, 1, 3) \
+ \
+ SC( 4, 5, 3, 1, no, yes, 5, 3) \
+ SC( 5, 5, 3, 2, no, yes, 3, 3) \
+ SC( 6, 5, 3, 3, no, yes, 7, 3) \
+ SC( 7, 5, 3, 4, no, yes, 1, 3) \
+ \
+ SC( 8, 6, 4, 1, no, yes, 5, 4) \
+ SC( 9, 6, 4, 2, no, yes, 3, 4) \
+ SC( 10, 6, 4, 3, no, yes, 7, 4) \
+ SC( 11, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 12, 7, 5, 1, no, yes, 5, 5) \
+ SC( 13, 7, 5, 2, no, yes, 3, 5) \
+ SC( 14, 7, 5, 3, no, yes, 7, 5) \
+ SC( 15, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 16, 8, 6, 1, no, yes, 5, 6) \
+ SC( 17, 8, 6, 2, no, yes, 3, 6) \
+ SC( 18, 8, 6, 3, no, yes, 7, 6) \
+ SC( 19, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 20, 9, 7, 1, no, yes, 5, 7) \
+ SC( 21, 9, 7, 2, no, yes, 3, 7) \
+ SC( 22, 9, 7, 3, no, yes, 7, 7) \
+ SC( 23, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 24, 10, 8, 1, no, yes, 5, 8) \
+ SC( 25, 10, 8, 2, no, yes, 3, 8) \
+ SC( 26, 10, 8, 3, no, yes, 7, 8) \
+ SC( 27, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 28, 11, 9, 1, no, yes, 5, 9) \
+ SC( 29, 11, 9, 2, no, yes, 3, 9) \
+ SC( 30, 11, 9, 3, no, yes, 7, 9) \
+ SC( 31, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 32, 12, 10, 1, no, yes, 5, no) \
+ SC( 33, 12, 10, 2, no, yes, 3, no) \
+ SC( 34, 12, 10, 3, no, yes, 7, no) \
+ SC( 35, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 36, 13, 11, 1, no, yes, 5, no) \
+ SC( 37, 13, 11, 2, no, yes, 3, no) \
+ SC( 38, 13, 11, 3, no, yes, 7, no) \
+ SC( 39, 13, 11, 4, no, yes, 1, no) \
+ \
+ SC( 40, 14, 12, 1, no, yes, 5, no) \
+ SC( 41, 14, 12, 2, no, yes, 3, no) \
+ SC( 42, 14, 12, 3, no, yes, 7, no) \
+ SC( 43, 14, 12, 4, no, yes, 1, no) \
+ \
+ SC( 44, 15, 13, 1, no, yes, 5, no) \
+ SC( 45, 15, 13, 2, no, yes, 3, no) \
+ SC( 46, 15, 13, 3, no, yes, 7, no) \
+ SC( 47, 15, 13, 4, yes, yes, 1, no) \
+ \
+ SC( 48, 16, 14, 1, no, yes, 5, no) \
+ SC( 49, 16, 14, 2, no, yes, 3, no) \
+ SC( 50, 16, 14, 3, no, yes, 7, no) \
+ SC( 51, 16, 14, 4, yes, yes, 2, no) \
+ \
+ SC( 52, 17, 15, 1, no, yes, 5, no) \
+ SC( 53, 17, 15, 2, yes, yes, 3, no) \
+ SC( 54, 17, 15, 3, no, yes, 7, no) \
+ SC( 55, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 56, 18, 16, 1, yes, no, 0, no) \
+ SC( 57, 18, 16, 2, yes, no, 0, no) \
+ SC( 58, 18, 16, 3, yes, no, 0, no) \
+ SC( 59, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 60, 19, 17, 1, yes, no, 0, no) \
+ SC( 61, 19, 17, 2, yes, no, 0, no) \
+ SC( 62, 19, 17, 3, yes, no, 0, no) \
+ SC( 63, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 64, 20, 18, 1, yes, no, 0, no) \
+ SC( 65, 20, 18, 2, yes, no, 0, no) \
+ SC( 66, 20, 18, 3, yes, no, 0, no) \
+ SC( 67, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 68, 21, 19, 1, yes, no, 0, no) \
+ SC( 69, 21, 19, 2, yes, no, 0, no) \
+ SC( 70, 21, 19, 3, yes, no, 0, no) \
+ SC( 71, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 72, 22, 20, 1, yes, no, 0, no) \
+ SC( 73, 22, 20, 2, yes, no, 0, no) \
+ SC( 74, 22, 20, 3, yes, no, 0, no) \
+ SC( 75, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 76, 23, 21, 1, yes, no, 0, no) \
+ SC( 77, 23, 21, 2, yes, no, 0, no) \
+ SC( 78, 23, 21, 3, yes, no, 0, no) \
+ SC( 79, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 80, 24, 22, 1, yes, no, 0, no) \
+ SC( 81, 24, 22, 2, yes, no, 0, no) \
+ SC( 82, 24, 22, 3, yes, no, 0, no) \
+ SC( 83, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 84, 25, 23, 1, yes, no, 0, no) \
+ SC( 85, 25, 23, 2, yes, no, 0, no) \
+ SC( 86, 25, 23, 3, yes, no, 0, no) \
+ SC( 87, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 88, 26, 24, 1, yes, no, 0, no) \
+ SC( 89, 26, 24, 2, yes, no, 0, no) \
+ SC( 90, 26, 24, 3, yes, no, 0, no) \
+ SC( 91, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 92, 27, 25, 1, yes, no, 0, no) \
+ SC( 93, 27, 25, 2, yes, no, 0, no) \
+ SC( 94, 27, 25, 3, yes, no, 0, no) \
+ SC( 95, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 96, 28, 26, 1, yes, no, 0, no) \
+ SC( 97, 28, 26, 2, yes, no, 0, no) \
+ SC( 98, 28, 26, 3, yes, no, 0, no) \
+ SC( 99, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC(100, 29, 27, 1, yes, no, 0, no) \
+ SC(101, 29, 27, 2, yes, no, 0, no) \
+ SC(102, 29, 27, 3, yes, no, 0, no) \
+ SC(103, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(104, 30, 28, 1, yes, no, 0, no) \
+ SC(105, 30, 28, 2, yes, no, 0, no) \
+ SC(106, 30, 28, 3, yes, no, 0, no) \
+ SC(107, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(108, 31, 29, 1, yes, no, 0, no) \
+ SC(109, 31, 29, 2, yes, no, 0, no) \
+ SC(110, 31, 29, 3, yes, no, 0, no) \
+ SC(111, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(112, 32, 30, 1, yes, no, 0, no) \
+ SC(113, 32, 30, 2, yes, no, 0, no) \
+ SC(114, 32, 30, 3, yes, no, 0, no) \
+ SC(115, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(116, 33, 31, 1, yes, no, 0, no) \
+ SC(117, 33, 31, 2, yes, no, 0, no) \
+ SC(118, 33, 31, 3, yes, no, 0, no) \
+ SC(119, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(120, 34, 32, 1, yes, no, 0, no) \
+ SC(121, 34, 32, 2, yes, no, 0, no) \
+ SC(122, 34, 32, 3, yes, no, 0, no) \
+ SC(123, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(124, 35, 33, 1, yes, no, 0, no) \
+ SC(125, 35, 33, 2, yes, no, 0, no) \
+ SC(126, 35, 33, 3, yes, no, 0, no) \
+ SC(127, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(128, 36, 34, 1, yes, no, 0, no) \
+ SC(129, 36, 34, 2, yes, no, 0, no) \
+ SC(130, 36, 34, 3, yes, no, 0, no) \
+ SC(131, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(132, 37, 35, 1, yes, no, 0, no) \
+ SC(133, 37, 35, 2, yes, no, 0, no) \
+ SC(134, 37, 35, 3, yes, no, 0, no) \
+ SC(135, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(136, 38, 36, 1, yes, no, 0, no) \
+ SC(137, 38, 36, 2, yes, no, 0, no) \
+ SC(138, 38, 36, 3, yes, no, 0, no) \
+ SC(139, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(140, 39, 37, 1, yes, no, 0, no) \
+ SC(141, 39, 37, 2, yes, no, 0, no) \
+ SC(142, 39, 37, 3, yes, no, 0, no) \
+ SC(143, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(144, 40, 38, 1, yes, no, 0, no) \
+ SC(145, 40, 38, 2, yes, no, 0, no) \
+ SC(146, 40, 38, 3, yes, no, 0, no) \
+ SC(147, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(148, 41, 39, 1, yes, no, 0, no) \
+ SC(149, 41, 39, 2, yes, no, 0, no) \
+ SC(150, 41, 39, 3, yes, no, 0, no) \
+ SC(151, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(152, 42, 40, 1, yes, no, 0, no) \
+ SC(153, 42, 40, 2, yes, no, 0, no) \
+ SC(154, 42, 40, 3, yes, no, 0, no) \
+ SC(155, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(156, 43, 41, 1, yes, no, 0, no) \
+ SC(157, 43, 41, 2, yes, no, 0, no) \
+ SC(158, 43, 41, 3, yes, no, 0, no) \
+ SC(159, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(160, 44, 42, 1, yes, no, 0, no) \
+ SC(161, 44, 42, 2, yes, no, 0, no) \
+ SC(162, 44, 42, 3, yes, no, 0, no) \
+ SC(163, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(164, 45, 43, 1, yes, no, 0, no) \
+ SC(165, 45, 43, 2, yes, no, 0, no) \
+ SC(166, 45, 43, 3, yes, no, 0, no) \
+ SC(167, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(168, 46, 44, 1, yes, no, 0, no) \
+ SC(169, 46, 44, 2, yes, no, 0, no) \
+ SC(170, 46, 44, 3, yes, no, 0, no) \
+ SC(171, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(172, 47, 45, 1, yes, no, 0, no) \
+ SC(173, 47, 45, 2, yes, no, 0, no) \
+ SC(174, 47, 45, 3, yes, no, 0, no) \
+ SC(175, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(176, 48, 46, 1, yes, no, 0, no) \
+ SC(177, 48, 46, 2, yes, no, 0, no) \
+ SC(178, 48, 46, 3, yes, no, 0, no) \
+ SC(179, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(180, 49, 47, 1, yes, no, 0, no) \
+ SC(181, 49, 47, 2, yes, no, 0, no) \
+ SC(182, 49, 47, 3, yes, no, 0, no) \
+ SC(183, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(184, 50, 48, 1, yes, no, 0, no) \
+ SC(185, 50, 48, 2, yes, no, 0, no) \
+ SC(186, 50, 48, 3, yes, no, 0, no) \
+ SC(187, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(188, 51, 49, 1, yes, no, 0, no) \
+ SC(189, 51, 49, 2, yes, no, 0, no) \
+ SC(190, 51, 49, 3, yes, no, 0, no) \
+ SC(191, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(192, 52, 50, 1, yes, no, 0, no) \
+ SC(193, 52, 50, 2, yes, no, 0, no) \
+ SC(194, 52, 50, 3, yes, no, 0, no) \
+ SC(195, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(196, 53, 51, 1, yes, no, 0, no) \
+ SC(197, 53, 51, 2, yes, no, 0, no) \
+ SC(198, 53, 51, 3, yes, no, 0, no) \
+ SC(199, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(200, 54, 52, 1, yes, no, 0, no) \
+ SC(201, 54, 52, 2, yes, no, 0, no) \
+ SC(202, 54, 52, 3, yes, no, 0, no) \
+ SC(203, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(204, 55, 53, 1, yes, no, 0, no) \
+ SC(205, 55, 53, 2, yes, no, 0, no) \
+ SC(206, 55, 53, 3, yes, no, 0, no) \
+ SC(207, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(208, 56, 54, 1, yes, no, 0, no) \
+ SC(209, 56, 54, 2, yes, no, 0, no) \
+ SC(210, 56, 54, 3, yes, no, 0, no) \
+ SC(211, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(212, 57, 55, 1, yes, no, 0, no) \
+ SC(213, 57, 55, 2, yes, no, 0, no) \
+ SC(214, 57, 55, 3, yes, no, 0, no) \
+ SC(215, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(216, 58, 56, 1, yes, no, 0, no) \
+ SC(217, 58, 56, 2, yes, no, 0, no) \
+ SC(218, 58, 56, 3, yes, no, 0, no) \
+ SC(219, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(220, 59, 57, 1, yes, no, 0, no) \
+ SC(221, 59, 57, 2, yes, no, 0, no) \
+ SC(222, 59, 57, 3, yes, no, 0, no) \
+ SC(223, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(224, 60, 58, 1, yes, no, 0, no) \
+ SC(225, 60, 58, 2, yes, no, 0, no) \
+ SC(226, 60, 58, 3, yes, no, 0, no) \
+ SC(227, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(228, 61, 59, 1, yes, no, 0, no) \
+ SC(229, 61, 59, 2, yes, no, 0, no) \
+ SC(230, 61, 59, 3, yes, no, 0, no) \
+ SC(231, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(232, 62, 60, 1, yes, no, 0, no) \
+ SC(233, 62, 60, 2, yes, no, 0, no) \
+ SC(234, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 32
-#define NBINS 55
-#define NSIZES 235
-#define NPSIZES 183
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
-#define LG_LARGE_MINCLASS 18
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 32
+#define NBINS 55
+#define NSIZES 235
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 183
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
+#define LG_LARGE_MINCLASS 18
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, yes, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, yes, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, yes, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, yes, no, no) \
- \
- SC( 37, 14, 12, 1, yes, no, no) \
- SC( 38, 14, 12, 2, yes, no, no) \
- SC( 39, 14, 12, 3, yes, no, no) \
- SC( 40, 14, 12, 4, yes, no, no) \
- \
- SC( 41, 15, 13, 1, yes, no, no) \
- SC( 42, 15, 13, 2, yes, no, no) \
- SC( 43, 15, 13, 3, yes, no, no) \
- SC( 44, 15, 13, 4, yes, no, no) \
- \
- SC( 45, 16, 14, 1, yes, no, no) \
- SC( 46, 16, 14, 2, yes, no, no) \
- SC( 47, 16, 14, 3, yes, no, no) \
- SC( 48, 16, 14, 4, yes, no, no) \
- \
- SC( 49, 17, 15, 1, yes, no, no) \
- SC( 50, 17, 15, 2, yes, no, no) \
- SC( 51, 17, 15, 3, yes, no, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
- SC(104, 30, 28, 4, yes, no, no) \
- \
- SC(105, 31, 29, 1, yes, no, no) \
- SC(106, 31, 29, 2, yes, no, no) \
- SC(107, 31, 29, 3, yes, no, no) \
- SC(108, 31, 29, 4, yes, no, no) \
- \
- SC(109, 32, 30, 1, yes, no, no) \
- SC(110, 32, 30, 2, yes, no, no) \
- SC(111, 32, 30, 3, yes, no, no) \
- SC(112, 32, 30, 4, yes, no, no) \
- \
- SC(113, 33, 31, 1, yes, no, no) \
- SC(114, 33, 31, 2, yes, no, no) \
- SC(115, 33, 31, 3, yes, no, no) \
- SC(116, 33, 31, 4, yes, no, no) \
- \
- SC(117, 34, 32, 1, yes, no, no) \
- SC(118, 34, 32, 2, yes, no, no) \
- SC(119, 34, 32, 3, yes, no, no) \
- SC(120, 34, 32, 4, yes, no, no) \
- \
- SC(121, 35, 33, 1, yes, no, no) \
- SC(122, 35, 33, 2, yes, no, no) \
- SC(123, 35, 33, 3, yes, no, no) \
- SC(124, 35, 33, 4, yes, no, no) \
- \
- SC(125, 36, 34, 1, yes, no, no) \
- SC(126, 36, 34, 2, yes, no, no) \
- SC(127, 36, 34, 3, yes, no, no) \
- SC(128, 36, 34, 4, yes, no, no) \
- \
- SC(129, 37, 35, 1, yes, no, no) \
- SC(130, 37, 35, 2, yes, no, no) \
- SC(131, 37, 35, 3, yes, no, no) \
- SC(132, 37, 35, 4, yes, no, no) \
- \
- SC(133, 38, 36, 1, yes, no, no) \
- SC(134, 38, 36, 2, yes, no, no) \
- SC(135, 38, 36, 3, yes, no, no) \
- SC(136, 38, 36, 4, yes, no, no) \
- \
- SC(137, 39, 37, 1, yes, no, no) \
- SC(138, 39, 37, 2, yes, no, no) \
- SC(139, 39, 37, 3, yes, no, no) \
- SC(140, 39, 37, 4, yes, no, no) \
- \
- SC(141, 40, 38, 1, yes, no, no) \
- SC(142, 40, 38, 2, yes, no, no) \
- SC(143, 40, 38, 3, yes, no, no) \
- SC(144, 40, 38, 4, yes, no, no) \
- \
- SC(145, 41, 39, 1, yes, no, no) \
- SC(146, 41, 39, 2, yes, no, no) \
- SC(147, 41, 39, 3, yes, no, no) \
- SC(148, 41, 39, 4, yes, no, no) \
- \
- SC(149, 42, 40, 1, yes, no, no) \
- SC(150, 42, 40, 2, yes, no, no) \
- SC(151, 42, 40, 3, yes, no, no) \
- SC(152, 42, 40, 4, yes, no, no) \
- \
- SC(153, 43, 41, 1, yes, no, no) \
- SC(154, 43, 41, 2, yes, no, no) \
- SC(155, 43, 41, 3, yes, no, no) \
- SC(156, 43, 41, 4, yes, no, no) \
- \
- SC(157, 44, 42, 1, yes, no, no) \
- SC(158, 44, 42, 2, yes, no, no) \
- SC(159, 44, 42, 3, yes, no, no) \
- SC(160, 44, 42, 4, yes, no, no) \
- \
- SC(161, 45, 43, 1, yes, no, no) \
- SC(162, 45, 43, 2, yes, no, no) \
- SC(163, 45, 43, 3, yes, no, no) \
- SC(164, 45, 43, 4, yes, no, no) \
- \
- SC(165, 46, 44, 1, yes, no, no) \
- SC(166, 46, 44, 2, yes, no, no) \
- SC(167, 46, 44, 3, yes, no, no) \
- SC(168, 46, 44, 4, yes, no, no) \
- \
- SC(169, 47, 45, 1, yes, no, no) \
- SC(170, 47, 45, 2, yes, no, no) \
- SC(171, 47, 45, 3, yes, no, no) \
- SC(172, 47, 45, 4, yes, no, no) \
- \
- SC(173, 48, 46, 1, yes, no, no) \
- SC(174, 48, 46, 2, yes, no, no) \
- SC(175, 48, 46, 3, yes, no, no) \
- SC(176, 48, 46, 4, yes, no, no) \
- \
- SC(177, 49, 47, 1, yes, no, no) \
- SC(178, 49, 47, 2, yes, no, no) \
- SC(179, 49, 47, 3, yes, no, no) \
- SC(180, 49, 47, 4, yes, no, no) \
- \
- SC(181, 50, 48, 1, yes, no, no) \
- SC(182, 50, 48, 2, yes, no, no) \
- SC(183, 50, 48, 3, yes, no, no) \
- SC(184, 50, 48, 4, yes, no, no) \
- \
- SC(185, 51, 49, 1, yes, no, no) \
- SC(186, 51, 49, 2, yes, no, no) \
- SC(187, 51, 49, 3, yes, no, no) \
- SC(188, 51, 49, 4, yes, no, no) \
- \
- SC(189, 52, 50, 1, yes, no, no) \
- SC(190, 52, 50, 2, yes, no, no) \
- SC(191, 52, 50, 3, yes, no, no) \
- SC(192, 52, 50, 4, yes, no, no) \
- \
- SC(193, 53, 51, 1, yes, no, no) \
- SC(194, 53, 51, 2, yes, no, no) \
- SC(195, 53, 51, 3, yes, no, no) \
- SC(196, 53, 51, 4, yes, no, no) \
- \
- SC(197, 54, 52, 1, yes, no, no) \
- SC(198, 54, 52, 2, yes, no, no) \
- SC(199, 54, 52, 3, yes, no, no) \
- SC(200, 54, 52, 4, yes, no, no) \
- \
- SC(201, 55, 53, 1, yes, no, no) \
- SC(202, 55, 53, 2, yes, no, no) \
- SC(203, 55, 53, 3, yes, no, no) \
- SC(204, 55, 53, 4, yes, no, no) \
- \
- SC(205, 56, 54, 1, yes, no, no) \
- SC(206, 56, 54, 2, yes, no, no) \
- SC(207, 56, 54, 3, yes, no, no) \
- SC(208, 56, 54, 4, yes, no, no) \
- \
- SC(209, 57, 55, 1, yes, no, no) \
- SC(210, 57, 55, 2, yes, no, no) \
- SC(211, 57, 55, 3, yes, no, no) \
- SC(212, 57, 55, 4, yes, no, no) \
- \
- SC(213, 58, 56, 1, yes, no, no) \
- SC(214, 58, 56, 2, yes, no, no) \
- SC(215, 58, 56, 3, yes, no, no) \
- SC(216, 58, 56, 4, yes, no, no) \
- \
- SC(217, 59, 57, 1, yes, no, no) \
- SC(218, 59, 57, 2, yes, no, no) \
- SC(219, 59, 57, 3, yes, no, no) \
- SC(220, 59, 57, 4, yes, no, no) \
- \
- SC(221, 60, 58, 1, yes, no, no) \
- SC(222, 60, 58, 2, yes, no, no) \
- SC(223, 60, 58, 3, yes, no, no) \
- SC(224, 60, 58, 4, yes, no, no) \
- \
- SC(225, 61, 59, 1, yes, no, no) \
- SC(226, 61, 59, 2, yes, no, no) \
- SC(227, 61, 59, 3, yes, no, no) \
- SC(228, 61, 59, 4, yes, no, no) \
- \
- SC(229, 62, 60, 1, yes, no, no) \
- SC(230, 62, 60, 2, yes, no, no) \
- SC(231, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, yes, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, yes, yes, 2, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, yes, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, yes, no, 0, no) \
+ \
+ SC( 37, 14, 12, 1, yes, no, 0, no) \
+ SC( 38, 14, 12, 2, yes, no, 0, no) \
+ SC( 39, 14, 12, 3, yes, no, 0, no) \
+ SC( 40, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 41, 15, 13, 1, yes, no, 0, no) \
+ SC( 42, 15, 13, 2, yes, no, 0, no) \
+ SC( 43, 15, 13, 3, yes, no, 0, no) \
+ SC( 44, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 45, 16, 14, 1, yes, no, 0, no) \
+ SC( 46, 16, 14, 2, yes, no, 0, no) \
+ SC( 47, 16, 14, 3, yes, no, 0, no) \
+ SC( 48, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 49, 17, 15, 1, yes, no, 0, no) \
+ SC( 50, 17, 15, 2, yes, no, 0, no) \
+ SC( 51, 17, 15, 3, yes, no, 0, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
+ SC(104, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(105, 31, 29, 1, yes, no, 0, no) \
+ SC(106, 31, 29, 2, yes, no, 0, no) \
+ SC(107, 31, 29, 3, yes, no, 0, no) \
+ SC(108, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(109, 32, 30, 1, yes, no, 0, no) \
+ SC(110, 32, 30, 2, yes, no, 0, no) \
+ SC(111, 32, 30, 3, yes, no, 0, no) \
+ SC(112, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(113, 33, 31, 1, yes, no, 0, no) \
+ SC(114, 33, 31, 2, yes, no, 0, no) \
+ SC(115, 33, 31, 3, yes, no, 0, no) \
+ SC(116, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(117, 34, 32, 1, yes, no, 0, no) \
+ SC(118, 34, 32, 2, yes, no, 0, no) \
+ SC(119, 34, 32, 3, yes, no, 0, no) \
+ SC(120, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(121, 35, 33, 1, yes, no, 0, no) \
+ SC(122, 35, 33, 2, yes, no, 0, no) \
+ SC(123, 35, 33, 3, yes, no, 0, no) \
+ SC(124, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(125, 36, 34, 1, yes, no, 0, no) \
+ SC(126, 36, 34, 2, yes, no, 0, no) \
+ SC(127, 36, 34, 3, yes, no, 0, no) \
+ SC(128, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(129, 37, 35, 1, yes, no, 0, no) \
+ SC(130, 37, 35, 2, yes, no, 0, no) \
+ SC(131, 37, 35, 3, yes, no, 0, no) \
+ SC(132, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(133, 38, 36, 1, yes, no, 0, no) \
+ SC(134, 38, 36, 2, yes, no, 0, no) \
+ SC(135, 38, 36, 3, yes, no, 0, no) \
+ SC(136, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(137, 39, 37, 1, yes, no, 0, no) \
+ SC(138, 39, 37, 2, yes, no, 0, no) \
+ SC(139, 39, 37, 3, yes, no, 0, no) \
+ SC(140, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(141, 40, 38, 1, yes, no, 0, no) \
+ SC(142, 40, 38, 2, yes, no, 0, no) \
+ SC(143, 40, 38, 3, yes, no, 0, no) \
+ SC(144, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(145, 41, 39, 1, yes, no, 0, no) \
+ SC(146, 41, 39, 2, yes, no, 0, no) \
+ SC(147, 41, 39, 3, yes, no, 0, no) \
+ SC(148, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(149, 42, 40, 1, yes, no, 0, no) \
+ SC(150, 42, 40, 2, yes, no, 0, no) \
+ SC(151, 42, 40, 3, yes, no, 0, no) \
+ SC(152, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(153, 43, 41, 1, yes, no, 0, no) \
+ SC(154, 43, 41, 2, yes, no, 0, no) \
+ SC(155, 43, 41, 3, yes, no, 0, no) \
+ SC(156, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(157, 44, 42, 1, yes, no, 0, no) \
+ SC(158, 44, 42, 2, yes, no, 0, no) \
+ SC(159, 44, 42, 3, yes, no, 0, no) \
+ SC(160, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(161, 45, 43, 1, yes, no, 0, no) \
+ SC(162, 45, 43, 2, yes, no, 0, no) \
+ SC(163, 45, 43, 3, yes, no, 0, no) \
+ SC(164, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(165, 46, 44, 1, yes, no, 0, no) \
+ SC(166, 46, 44, 2, yes, no, 0, no) \
+ SC(167, 46, 44, 3, yes, no, 0, no) \
+ SC(168, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(169, 47, 45, 1, yes, no, 0, no) \
+ SC(170, 47, 45, 2, yes, no, 0, no) \
+ SC(171, 47, 45, 3, yes, no, 0, no) \
+ SC(172, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(173, 48, 46, 1, yes, no, 0, no) \
+ SC(174, 48, 46, 2, yes, no, 0, no) \
+ SC(175, 48, 46, 3, yes, no, 0, no) \
+ SC(176, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(177, 49, 47, 1, yes, no, 0, no) \
+ SC(178, 49, 47, 2, yes, no, 0, no) \
+ SC(179, 49, 47, 3, yes, no, 0, no) \
+ SC(180, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(181, 50, 48, 1, yes, no, 0, no) \
+ SC(182, 50, 48, 2, yes, no, 0, no) \
+ SC(183, 50, 48, 3, yes, no, 0, no) \
+ SC(184, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(185, 51, 49, 1, yes, no, 0, no) \
+ SC(186, 51, 49, 2, yes, no, 0, no) \
+ SC(187, 51, 49, 3, yes, no, 0, no) \
+ SC(188, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(189, 52, 50, 1, yes, no, 0, no) \
+ SC(190, 52, 50, 2, yes, no, 0, no) \
+ SC(191, 52, 50, 3, yes, no, 0, no) \
+ SC(192, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(193, 53, 51, 1, yes, no, 0, no) \
+ SC(194, 53, 51, 2, yes, no, 0, no) \
+ SC(195, 53, 51, 3, yes, no, 0, no) \
+ SC(196, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(197, 54, 52, 1, yes, no, 0, no) \
+ SC(198, 54, 52, 2, yes, no, 0, no) \
+ SC(199, 54, 52, 3, yes, no, 0, no) \
+ SC(200, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(201, 55, 53, 1, yes, no, 0, no) \
+ SC(202, 55, 53, 2, yes, no, 0, no) \
+ SC(203, 55, 53, 3, yes, no, 0, no) \
+ SC(204, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(205, 56, 54, 1, yes, no, 0, no) \
+ SC(206, 56, 54, 2, yes, no, 0, no) \
+ SC(207, 56, 54, 3, yes, no, 0, no) \
+ SC(208, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(209, 57, 55, 1, yes, no, 0, no) \
+ SC(210, 57, 55, 2, yes, no, 0, no) \
+ SC(211, 57, 55, 3, yes, no, 0, no) \
+ SC(212, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(213, 58, 56, 1, yes, no, 0, no) \
+ SC(214, 58, 56, 2, yes, no, 0, no) \
+ SC(215, 58, 56, 3, yes, no, 0, no) \
+ SC(216, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(217, 59, 57, 1, yes, no, 0, no) \
+ SC(218, 59, 57, 2, yes, no, 0, no) \
+ SC(219, 59, 57, 3, yes, no, 0, no) \
+ SC(220, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(221, 60, 58, 1, yes, no, 0, no) \
+ SC(222, 60, 58, 2, yes, no, 0, no) \
+ SC(223, 60, 58, 3, yes, no, 0, no) \
+ SC(224, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(225, 61, 59, 1, yes, no, 0, no) \
+ SC(226, 61, 59, 2, yes, no, 0, no) \
+ SC(227, 61, 59, 3, yes, no, 0, no) \
+ SC(228, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(229, 62, 60, 1, yes, no, 0, no) \
+ SC(230, 62, 60, 2, yes, no, 0, no) \
+ SC(231, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 36
-#define NSIZES 232
-#define NPSIZES 199
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
-#define LG_LARGE_MINCLASS 14
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 36
+#define NSIZES 232
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 199
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
+#define LG_LARGE_MINCLASS 14
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, no, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, yes, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, no, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, yes, yes, no) \
- \
- SC( 37, 14, 12, 1, no, yes, no) \
- SC( 38, 14, 12, 2, yes, yes, no) \
- SC( 39, 14, 12, 3, no, yes, no) \
- SC( 40, 14, 12, 4, yes, no, no) \
- \
- SC( 41, 15, 13, 1, yes, no, no) \
- SC( 42, 15, 13, 2, yes, no, no) \
- SC( 43, 15, 13, 3, yes, no, no) \
- SC( 44, 15, 13, 4, yes, no, no) \
- \
- SC( 45, 16, 14, 1, yes, no, no) \
- SC( 46, 16, 14, 2, yes, no, no) \
- SC( 47, 16, 14, 3, yes, no, no) \
- SC( 48, 16, 14, 4, yes, no, no) \
- \
- SC( 49, 17, 15, 1, yes, no, no) \
- SC( 50, 17, 15, 2, yes, no, no) \
- SC( 51, 17, 15, 3, yes, no, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
- SC(104, 30, 28, 4, yes, no, no) \
- \
- SC(105, 31, 29, 1, yes, no, no) \
- SC(106, 31, 29, 2, yes, no, no) \
- SC(107, 31, 29, 3, yes, no, no) \
- SC(108, 31, 29, 4, yes, no, no) \
- \
- SC(109, 32, 30, 1, yes, no, no) \
- SC(110, 32, 30, 2, yes, no, no) \
- SC(111, 32, 30, 3, yes, no, no) \
- SC(112, 32, 30, 4, yes, no, no) \
- \
- SC(113, 33, 31, 1, yes, no, no) \
- SC(114, 33, 31, 2, yes, no, no) \
- SC(115, 33, 31, 3, yes, no, no) \
- SC(116, 33, 31, 4, yes, no, no) \
- \
- SC(117, 34, 32, 1, yes, no, no) \
- SC(118, 34, 32, 2, yes, no, no) \
- SC(119, 34, 32, 3, yes, no, no) \
- SC(120, 34, 32, 4, yes, no, no) \
- \
- SC(121, 35, 33, 1, yes, no, no) \
- SC(122, 35, 33, 2, yes, no, no) \
- SC(123, 35, 33, 3, yes, no, no) \
- SC(124, 35, 33, 4, yes, no, no) \
- \
- SC(125, 36, 34, 1, yes, no, no) \
- SC(126, 36, 34, 2, yes, no, no) \
- SC(127, 36, 34, 3, yes, no, no) \
- SC(128, 36, 34, 4, yes, no, no) \
- \
- SC(129, 37, 35, 1, yes, no, no) \
- SC(130, 37, 35, 2, yes, no, no) \
- SC(131, 37, 35, 3, yes, no, no) \
- SC(132, 37, 35, 4, yes, no, no) \
- \
- SC(133, 38, 36, 1, yes, no, no) \
- SC(134, 38, 36, 2, yes, no, no) \
- SC(135, 38, 36, 3, yes, no, no) \
- SC(136, 38, 36, 4, yes, no, no) \
- \
- SC(137, 39, 37, 1, yes, no, no) \
- SC(138, 39, 37, 2, yes, no, no) \
- SC(139, 39, 37, 3, yes, no, no) \
- SC(140, 39, 37, 4, yes, no, no) \
- \
- SC(141, 40, 38, 1, yes, no, no) \
- SC(142, 40, 38, 2, yes, no, no) \
- SC(143, 40, 38, 3, yes, no, no) \
- SC(144, 40, 38, 4, yes, no, no) \
- \
- SC(145, 41, 39, 1, yes, no, no) \
- SC(146, 41, 39, 2, yes, no, no) \
- SC(147, 41, 39, 3, yes, no, no) \
- SC(148, 41, 39, 4, yes, no, no) \
- \
- SC(149, 42, 40, 1, yes, no, no) \
- SC(150, 42, 40, 2, yes, no, no) \
- SC(151, 42, 40, 3, yes, no, no) \
- SC(152, 42, 40, 4, yes, no, no) \
- \
- SC(153, 43, 41, 1, yes, no, no) \
- SC(154, 43, 41, 2, yes, no, no) \
- SC(155, 43, 41, 3, yes, no, no) \
- SC(156, 43, 41, 4, yes, no, no) \
- \
- SC(157, 44, 42, 1, yes, no, no) \
- SC(158, 44, 42, 2, yes, no, no) \
- SC(159, 44, 42, 3, yes, no, no) \
- SC(160, 44, 42, 4, yes, no, no) \
- \
- SC(161, 45, 43, 1, yes, no, no) \
- SC(162, 45, 43, 2, yes, no, no) \
- SC(163, 45, 43, 3, yes, no, no) \
- SC(164, 45, 43, 4, yes, no, no) \
- \
- SC(165, 46, 44, 1, yes, no, no) \
- SC(166, 46, 44, 2, yes, no, no) \
- SC(167, 46, 44, 3, yes, no, no) \
- SC(168, 46, 44, 4, yes, no, no) \
- \
- SC(169, 47, 45, 1, yes, no, no) \
- SC(170, 47, 45, 2, yes, no, no) \
- SC(171, 47, 45, 3, yes, no, no) \
- SC(172, 47, 45, 4, yes, no, no) \
- \
- SC(173, 48, 46, 1, yes, no, no) \
- SC(174, 48, 46, 2, yes, no, no) \
- SC(175, 48, 46, 3, yes, no, no) \
- SC(176, 48, 46, 4, yes, no, no) \
- \
- SC(177, 49, 47, 1, yes, no, no) \
- SC(178, 49, 47, 2, yes, no, no) \
- SC(179, 49, 47, 3, yes, no, no) \
- SC(180, 49, 47, 4, yes, no, no) \
- \
- SC(181, 50, 48, 1, yes, no, no) \
- SC(182, 50, 48, 2, yes, no, no) \
- SC(183, 50, 48, 3, yes, no, no) \
- SC(184, 50, 48, 4, yes, no, no) \
- \
- SC(185, 51, 49, 1, yes, no, no) \
- SC(186, 51, 49, 2, yes, no, no) \
- SC(187, 51, 49, 3, yes, no, no) \
- SC(188, 51, 49, 4, yes, no, no) \
- \
- SC(189, 52, 50, 1, yes, no, no) \
- SC(190, 52, 50, 2, yes, no, no) \
- SC(191, 52, 50, 3, yes, no, no) \
- SC(192, 52, 50, 4, yes, no, no) \
- \
- SC(193, 53, 51, 1, yes, no, no) \
- SC(194, 53, 51, 2, yes, no, no) \
- SC(195, 53, 51, 3, yes, no, no) \
- SC(196, 53, 51, 4, yes, no, no) \
- \
- SC(197, 54, 52, 1, yes, no, no) \
- SC(198, 54, 52, 2, yes, no, no) \
- SC(199, 54, 52, 3, yes, no, no) \
- SC(200, 54, 52, 4, yes, no, no) \
- \
- SC(201, 55, 53, 1, yes, no, no) \
- SC(202, 55, 53, 2, yes, no, no) \
- SC(203, 55, 53, 3, yes, no, no) \
- SC(204, 55, 53, 4, yes, no, no) \
- \
- SC(205, 56, 54, 1, yes, no, no) \
- SC(206, 56, 54, 2, yes, no, no) \
- SC(207, 56, 54, 3, yes, no, no) \
- SC(208, 56, 54, 4, yes, no, no) \
- \
- SC(209, 57, 55, 1, yes, no, no) \
- SC(210, 57, 55, 2, yes, no, no) \
- SC(211, 57, 55, 3, yes, no, no) \
- SC(212, 57, 55, 4, yes, no, no) \
- \
- SC(213, 58, 56, 1, yes, no, no) \
- SC(214, 58, 56, 2, yes, no, no) \
- SC(215, 58, 56, 3, yes, no, no) \
- SC(216, 58, 56, 4, yes, no, no) \
- \
- SC(217, 59, 57, 1, yes, no, no) \
- SC(218, 59, 57, 2, yes, no, no) \
- SC(219, 59, 57, 3, yes, no, no) \
- SC(220, 59, 57, 4, yes, no, no) \
- \
- SC(221, 60, 58, 1, yes, no, no) \
- SC(222, 60, 58, 2, yes, no, no) \
- SC(223, 60, 58, 3, yes, no, no) \
- SC(224, 60, 58, 4, yes, no, no) \
- \
- SC(225, 61, 59, 1, yes, no, no) \
- SC(226, 61, 59, 2, yes, no, no) \
- SC(227, 61, 59, 3, yes, no, no) \
- SC(228, 61, 59, 4, yes, no, no) \
- \
- SC(229, 62, 60, 1, yes, no, no) \
- SC(230, 62, 60, 2, yes, no, no) \
- SC(231, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, yes, yes, 1, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, no, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, yes, yes, 2, no) \
+ \
+ SC( 37, 14, 12, 1, no, yes, 5, no) \
+ SC( 38, 14, 12, 2, yes, yes, 3, no) \
+ SC( 39, 14, 12, 3, no, yes, 7, no) \
+ SC( 40, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 41, 15, 13, 1, yes, no, 0, no) \
+ SC( 42, 15, 13, 2, yes, no, 0, no) \
+ SC( 43, 15, 13, 3, yes, no, 0, no) \
+ SC( 44, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 45, 16, 14, 1, yes, no, 0, no) \
+ SC( 46, 16, 14, 2, yes, no, 0, no) \
+ SC( 47, 16, 14, 3, yes, no, 0, no) \
+ SC( 48, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 49, 17, 15, 1, yes, no, 0, no) \
+ SC( 50, 17, 15, 2, yes, no, 0, no) \
+ SC( 51, 17, 15, 3, yes, no, 0, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
+ SC(104, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(105, 31, 29, 1, yes, no, 0, no) \
+ SC(106, 31, 29, 2, yes, no, 0, no) \
+ SC(107, 31, 29, 3, yes, no, 0, no) \
+ SC(108, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(109, 32, 30, 1, yes, no, 0, no) \
+ SC(110, 32, 30, 2, yes, no, 0, no) \
+ SC(111, 32, 30, 3, yes, no, 0, no) \
+ SC(112, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(113, 33, 31, 1, yes, no, 0, no) \
+ SC(114, 33, 31, 2, yes, no, 0, no) \
+ SC(115, 33, 31, 3, yes, no, 0, no) \
+ SC(116, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(117, 34, 32, 1, yes, no, 0, no) \
+ SC(118, 34, 32, 2, yes, no, 0, no) \
+ SC(119, 34, 32, 3, yes, no, 0, no) \
+ SC(120, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(121, 35, 33, 1, yes, no, 0, no) \
+ SC(122, 35, 33, 2, yes, no, 0, no) \
+ SC(123, 35, 33, 3, yes, no, 0, no) \
+ SC(124, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(125, 36, 34, 1, yes, no, 0, no) \
+ SC(126, 36, 34, 2, yes, no, 0, no) \
+ SC(127, 36, 34, 3, yes, no, 0, no) \
+ SC(128, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(129, 37, 35, 1, yes, no, 0, no) \
+ SC(130, 37, 35, 2, yes, no, 0, no) \
+ SC(131, 37, 35, 3, yes, no, 0, no) \
+ SC(132, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(133, 38, 36, 1, yes, no, 0, no) \
+ SC(134, 38, 36, 2, yes, no, 0, no) \
+ SC(135, 38, 36, 3, yes, no, 0, no) \
+ SC(136, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(137, 39, 37, 1, yes, no, 0, no) \
+ SC(138, 39, 37, 2, yes, no, 0, no) \
+ SC(139, 39, 37, 3, yes, no, 0, no) \
+ SC(140, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(141, 40, 38, 1, yes, no, 0, no) \
+ SC(142, 40, 38, 2, yes, no, 0, no) \
+ SC(143, 40, 38, 3, yes, no, 0, no) \
+ SC(144, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(145, 41, 39, 1, yes, no, 0, no) \
+ SC(146, 41, 39, 2, yes, no, 0, no) \
+ SC(147, 41, 39, 3, yes, no, 0, no) \
+ SC(148, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(149, 42, 40, 1, yes, no, 0, no) \
+ SC(150, 42, 40, 2, yes, no, 0, no) \
+ SC(151, 42, 40, 3, yes, no, 0, no) \
+ SC(152, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(153, 43, 41, 1, yes, no, 0, no) \
+ SC(154, 43, 41, 2, yes, no, 0, no) \
+ SC(155, 43, 41, 3, yes, no, 0, no) \
+ SC(156, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(157, 44, 42, 1, yes, no, 0, no) \
+ SC(158, 44, 42, 2, yes, no, 0, no) \
+ SC(159, 44, 42, 3, yes, no, 0, no) \
+ SC(160, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(161, 45, 43, 1, yes, no, 0, no) \
+ SC(162, 45, 43, 2, yes, no, 0, no) \
+ SC(163, 45, 43, 3, yes, no, 0, no) \
+ SC(164, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(165, 46, 44, 1, yes, no, 0, no) \
+ SC(166, 46, 44, 2, yes, no, 0, no) \
+ SC(167, 46, 44, 3, yes, no, 0, no) \
+ SC(168, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(169, 47, 45, 1, yes, no, 0, no) \
+ SC(170, 47, 45, 2, yes, no, 0, no) \
+ SC(171, 47, 45, 3, yes, no, 0, no) \
+ SC(172, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(173, 48, 46, 1, yes, no, 0, no) \
+ SC(174, 48, 46, 2, yes, no, 0, no) \
+ SC(175, 48, 46, 3, yes, no, 0, no) \
+ SC(176, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(177, 49, 47, 1, yes, no, 0, no) \
+ SC(178, 49, 47, 2, yes, no, 0, no) \
+ SC(179, 49, 47, 3, yes, no, 0, no) \
+ SC(180, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(181, 50, 48, 1, yes, no, 0, no) \
+ SC(182, 50, 48, 2, yes, no, 0, no) \
+ SC(183, 50, 48, 3, yes, no, 0, no) \
+ SC(184, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(185, 51, 49, 1, yes, no, 0, no) \
+ SC(186, 51, 49, 2, yes, no, 0, no) \
+ SC(187, 51, 49, 3, yes, no, 0, no) \
+ SC(188, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(189, 52, 50, 1, yes, no, 0, no) \
+ SC(190, 52, 50, 2, yes, no, 0, no) \
+ SC(191, 52, 50, 3, yes, no, 0, no) \
+ SC(192, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(193, 53, 51, 1, yes, no, 0, no) \
+ SC(194, 53, 51, 2, yes, no, 0, no) \
+ SC(195, 53, 51, 3, yes, no, 0, no) \
+ SC(196, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(197, 54, 52, 1, yes, no, 0, no) \
+ SC(198, 54, 52, 2, yes, no, 0, no) \
+ SC(199, 54, 52, 3, yes, no, 0, no) \
+ SC(200, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(201, 55, 53, 1, yes, no, 0, no) \
+ SC(202, 55, 53, 2, yes, no, 0, no) \
+ SC(203, 55, 53, 3, yes, no, 0, no) \
+ SC(204, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(205, 56, 54, 1, yes, no, 0, no) \
+ SC(206, 56, 54, 2, yes, no, 0, no) \
+ SC(207, 56, 54, 3, yes, no, 0, no) \
+ SC(208, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(209, 57, 55, 1, yes, no, 0, no) \
+ SC(210, 57, 55, 2, yes, no, 0, no) \
+ SC(211, 57, 55, 3, yes, no, 0, no) \
+ SC(212, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(213, 58, 56, 1, yes, no, 0, no) \
+ SC(214, 58, 56, 2, yes, no, 0, no) \
+ SC(215, 58, 56, 3, yes, no, 0, no) \
+ SC(216, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(217, 59, 57, 1, yes, no, 0, no) \
+ SC(218, 59, 57, 2, yes, no, 0, no) \
+ SC(219, 59, 57, 3, yes, no, 0, no) \
+ SC(220, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(221, 60, 58, 1, yes, no, 0, no) \
+ SC(222, 60, 58, 2, yes, no, 0, no) \
+ SC(223, 60, 58, 3, yes, no, 0, no) \
+ SC(224, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(225, 61, 59, 1, yes, no, 0, no) \
+ SC(226, 61, 59, 2, yes, no, 0, no) \
+ SC(227, 61, 59, 3, yes, no, 0, no) \
+ SC(228, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(229, 62, 60, 1, yes, no, 0, no) \
+ SC(230, 62, 60, 2, yes, no, 0, no) \
+ SC(231, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 40
-#define NSIZES 232
-#define NPSIZES 195
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
-#define LG_LARGE_MINCLASS 15
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 40
+#define NSIZES 232
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 195
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
+#define LG_LARGE_MINCLASS 15
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, no, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, no, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, no, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, yes, yes, no) \
- \
- SC( 37, 14, 12, 1, no, yes, no) \
- SC( 38, 14, 12, 2, no, yes, no) \
- SC( 39, 14, 12, 3, no, yes, no) \
- SC( 40, 14, 12, 4, yes, yes, no) \
- \
- SC( 41, 15, 13, 1, no, yes, no) \
- SC( 42, 15, 13, 2, yes, yes, no) \
- SC( 43, 15, 13, 3, no, yes, no) \
- SC( 44, 15, 13, 4, yes, no, no) \
- \
- SC( 45, 16, 14, 1, yes, no, no) \
- SC( 46, 16, 14, 2, yes, no, no) \
- SC( 47, 16, 14, 3, yes, no, no) \
- SC( 48, 16, 14, 4, yes, no, no) \
- \
- SC( 49, 17, 15, 1, yes, no, no) \
- SC( 50, 17, 15, 2, yes, no, no) \
- SC( 51, 17, 15, 3, yes, no, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
- SC(104, 30, 28, 4, yes, no, no) \
- \
- SC(105, 31, 29, 1, yes, no, no) \
- SC(106, 31, 29, 2, yes, no, no) \
- SC(107, 31, 29, 3, yes, no, no) \
- SC(108, 31, 29, 4, yes, no, no) \
- \
- SC(109, 32, 30, 1, yes, no, no) \
- SC(110, 32, 30, 2, yes, no, no) \
- SC(111, 32, 30, 3, yes, no, no) \
- SC(112, 32, 30, 4, yes, no, no) \
- \
- SC(113, 33, 31, 1, yes, no, no) \
- SC(114, 33, 31, 2, yes, no, no) \
- SC(115, 33, 31, 3, yes, no, no) \
- SC(116, 33, 31, 4, yes, no, no) \
- \
- SC(117, 34, 32, 1, yes, no, no) \
- SC(118, 34, 32, 2, yes, no, no) \
- SC(119, 34, 32, 3, yes, no, no) \
- SC(120, 34, 32, 4, yes, no, no) \
- \
- SC(121, 35, 33, 1, yes, no, no) \
- SC(122, 35, 33, 2, yes, no, no) \
- SC(123, 35, 33, 3, yes, no, no) \
- SC(124, 35, 33, 4, yes, no, no) \
- \
- SC(125, 36, 34, 1, yes, no, no) \
- SC(126, 36, 34, 2, yes, no, no) \
- SC(127, 36, 34, 3, yes, no, no) \
- SC(128, 36, 34, 4, yes, no, no) \
- \
- SC(129, 37, 35, 1, yes, no, no) \
- SC(130, 37, 35, 2, yes, no, no) \
- SC(131, 37, 35, 3, yes, no, no) \
- SC(132, 37, 35, 4, yes, no, no) \
- \
- SC(133, 38, 36, 1, yes, no, no) \
- SC(134, 38, 36, 2, yes, no, no) \
- SC(135, 38, 36, 3, yes, no, no) \
- SC(136, 38, 36, 4, yes, no, no) \
- \
- SC(137, 39, 37, 1, yes, no, no) \
- SC(138, 39, 37, 2, yes, no, no) \
- SC(139, 39, 37, 3, yes, no, no) \
- SC(140, 39, 37, 4, yes, no, no) \
- \
- SC(141, 40, 38, 1, yes, no, no) \
- SC(142, 40, 38, 2, yes, no, no) \
- SC(143, 40, 38, 3, yes, no, no) \
- SC(144, 40, 38, 4, yes, no, no) \
- \
- SC(145, 41, 39, 1, yes, no, no) \
- SC(146, 41, 39, 2, yes, no, no) \
- SC(147, 41, 39, 3, yes, no, no) \
- SC(148, 41, 39, 4, yes, no, no) \
- \
- SC(149, 42, 40, 1, yes, no, no) \
- SC(150, 42, 40, 2, yes, no, no) \
- SC(151, 42, 40, 3, yes, no, no) \
- SC(152, 42, 40, 4, yes, no, no) \
- \
- SC(153, 43, 41, 1, yes, no, no) \
- SC(154, 43, 41, 2, yes, no, no) \
- SC(155, 43, 41, 3, yes, no, no) \
- SC(156, 43, 41, 4, yes, no, no) \
- \
- SC(157, 44, 42, 1, yes, no, no) \
- SC(158, 44, 42, 2, yes, no, no) \
- SC(159, 44, 42, 3, yes, no, no) \
- SC(160, 44, 42, 4, yes, no, no) \
- \
- SC(161, 45, 43, 1, yes, no, no) \
- SC(162, 45, 43, 2, yes, no, no) \
- SC(163, 45, 43, 3, yes, no, no) \
- SC(164, 45, 43, 4, yes, no, no) \
- \
- SC(165, 46, 44, 1, yes, no, no) \
- SC(166, 46, 44, 2, yes, no, no) \
- SC(167, 46, 44, 3, yes, no, no) \
- SC(168, 46, 44, 4, yes, no, no) \
- \
- SC(169, 47, 45, 1, yes, no, no) \
- SC(170, 47, 45, 2, yes, no, no) \
- SC(171, 47, 45, 3, yes, no, no) \
- SC(172, 47, 45, 4, yes, no, no) \
- \
- SC(173, 48, 46, 1, yes, no, no) \
- SC(174, 48, 46, 2, yes, no, no) \
- SC(175, 48, 46, 3, yes, no, no) \
- SC(176, 48, 46, 4, yes, no, no) \
- \
- SC(177, 49, 47, 1, yes, no, no) \
- SC(178, 49, 47, 2, yes, no, no) \
- SC(179, 49, 47, 3, yes, no, no) \
- SC(180, 49, 47, 4, yes, no, no) \
- \
- SC(181, 50, 48, 1, yes, no, no) \
- SC(182, 50, 48, 2, yes, no, no) \
- SC(183, 50, 48, 3, yes, no, no) \
- SC(184, 50, 48, 4, yes, no, no) \
- \
- SC(185, 51, 49, 1, yes, no, no) \
- SC(186, 51, 49, 2, yes, no, no) \
- SC(187, 51, 49, 3, yes, no, no) \
- SC(188, 51, 49, 4, yes, no, no) \
- \
- SC(189, 52, 50, 1, yes, no, no) \
- SC(190, 52, 50, 2, yes, no, no) \
- SC(191, 52, 50, 3, yes, no, no) \
- SC(192, 52, 50, 4, yes, no, no) \
- \
- SC(193, 53, 51, 1, yes, no, no) \
- SC(194, 53, 51, 2, yes, no, no) \
- SC(195, 53, 51, 3, yes, no, no) \
- SC(196, 53, 51, 4, yes, no, no) \
- \
- SC(197, 54, 52, 1, yes, no, no) \
- SC(198, 54, 52, 2, yes, no, no) \
- SC(199, 54, 52, 3, yes, no, no) \
- SC(200, 54, 52, 4, yes, no, no) \
- \
- SC(201, 55, 53, 1, yes, no, no) \
- SC(202, 55, 53, 2, yes, no, no) \
- SC(203, 55, 53, 3, yes, no, no) \
- SC(204, 55, 53, 4, yes, no, no) \
- \
- SC(205, 56, 54, 1, yes, no, no) \
- SC(206, 56, 54, 2, yes, no, no) \
- SC(207, 56, 54, 3, yes, no, no) \
- SC(208, 56, 54, 4, yes, no, no) \
- \
- SC(209, 57, 55, 1, yes, no, no) \
- SC(210, 57, 55, 2, yes, no, no) \
- SC(211, 57, 55, 3, yes, no, no) \
- SC(212, 57, 55, 4, yes, no, no) \
- \
- SC(213, 58, 56, 1, yes, no, no) \
- SC(214, 58, 56, 2, yes, no, no) \
- SC(215, 58, 56, 3, yes, no, no) \
- SC(216, 58, 56, 4, yes, no, no) \
- \
- SC(217, 59, 57, 1, yes, no, no) \
- SC(218, 59, 57, 2, yes, no, no) \
- SC(219, 59, 57, 3, yes, no, no) \
- SC(220, 59, 57, 4, yes, no, no) \
- \
- SC(221, 60, 58, 1, yes, no, no) \
- SC(222, 60, 58, 2, yes, no, no) \
- SC(223, 60, 58, 3, yes, no, no) \
- SC(224, 60, 58, 4, yes, no, no) \
- \
- SC(225, 61, 59, 1, yes, no, no) \
- SC(226, 61, 59, 2, yes, no, no) \
- SC(227, 61, 59, 3, yes, no, no) \
- SC(228, 61, 59, 4, yes, no, no) \
- \
- SC(229, 62, 60, 1, yes, no, no) \
- SC(230, 62, 60, 2, yes, no, no) \
- SC(231, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, no, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, yes, yes, 1, no) \
+ \
+ SC( 37, 14, 12, 1, no, yes, 5, no) \
+ SC( 38, 14, 12, 2, no, yes, 3, no) \
+ SC( 39, 14, 12, 3, no, yes, 7, no) \
+ SC( 40, 14, 12, 4, yes, yes, 2, no) \
+ \
+ SC( 41, 15, 13, 1, no, yes, 5, no) \
+ SC( 42, 15, 13, 2, yes, yes, 3, no) \
+ SC( 43, 15, 13, 3, no, yes, 7, no) \
+ SC( 44, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 45, 16, 14, 1, yes, no, 0, no) \
+ SC( 46, 16, 14, 2, yes, no, 0, no) \
+ SC( 47, 16, 14, 3, yes, no, 0, no) \
+ SC( 48, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 49, 17, 15, 1, yes, no, 0, no) \
+ SC( 50, 17, 15, 2, yes, no, 0, no) \
+ SC( 51, 17, 15, 3, yes, no, 0, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
+ SC(104, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(105, 31, 29, 1, yes, no, 0, no) \
+ SC(106, 31, 29, 2, yes, no, 0, no) \
+ SC(107, 31, 29, 3, yes, no, 0, no) \
+ SC(108, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(109, 32, 30, 1, yes, no, 0, no) \
+ SC(110, 32, 30, 2, yes, no, 0, no) \
+ SC(111, 32, 30, 3, yes, no, 0, no) \
+ SC(112, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(113, 33, 31, 1, yes, no, 0, no) \
+ SC(114, 33, 31, 2, yes, no, 0, no) \
+ SC(115, 33, 31, 3, yes, no, 0, no) \
+ SC(116, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(117, 34, 32, 1, yes, no, 0, no) \
+ SC(118, 34, 32, 2, yes, no, 0, no) \
+ SC(119, 34, 32, 3, yes, no, 0, no) \
+ SC(120, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(121, 35, 33, 1, yes, no, 0, no) \
+ SC(122, 35, 33, 2, yes, no, 0, no) \
+ SC(123, 35, 33, 3, yes, no, 0, no) \
+ SC(124, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(125, 36, 34, 1, yes, no, 0, no) \
+ SC(126, 36, 34, 2, yes, no, 0, no) \
+ SC(127, 36, 34, 3, yes, no, 0, no) \
+ SC(128, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(129, 37, 35, 1, yes, no, 0, no) \
+ SC(130, 37, 35, 2, yes, no, 0, no) \
+ SC(131, 37, 35, 3, yes, no, 0, no) \
+ SC(132, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(133, 38, 36, 1, yes, no, 0, no) \
+ SC(134, 38, 36, 2, yes, no, 0, no) \
+ SC(135, 38, 36, 3, yes, no, 0, no) \
+ SC(136, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(137, 39, 37, 1, yes, no, 0, no) \
+ SC(138, 39, 37, 2, yes, no, 0, no) \
+ SC(139, 39, 37, 3, yes, no, 0, no) \
+ SC(140, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(141, 40, 38, 1, yes, no, 0, no) \
+ SC(142, 40, 38, 2, yes, no, 0, no) \
+ SC(143, 40, 38, 3, yes, no, 0, no) \
+ SC(144, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(145, 41, 39, 1, yes, no, 0, no) \
+ SC(146, 41, 39, 2, yes, no, 0, no) \
+ SC(147, 41, 39, 3, yes, no, 0, no) \
+ SC(148, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(149, 42, 40, 1, yes, no, 0, no) \
+ SC(150, 42, 40, 2, yes, no, 0, no) \
+ SC(151, 42, 40, 3, yes, no, 0, no) \
+ SC(152, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(153, 43, 41, 1, yes, no, 0, no) \
+ SC(154, 43, 41, 2, yes, no, 0, no) \
+ SC(155, 43, 41, 3, yes, no, 0, no) \
+ SC(156, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(157, 44, 42, 1, yes, no, 0, no) \
+ SC(158, 44, 42, 2, yes, no, 0, no) \
+ SC(159, 44, 42, 3, yes, no, 0, no) \
+ SC(160, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(161, 45, 43, 1, yes, no, 0, no) \
+ SC(162, 45, 43, 2, yes, no, 0, no) \
+ SC(163, 45, 43, 3, yes, no, 0, no) \
+ SC(164, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(165, 46, 44, 1, yes, no, 0, no) \
+ SC(166, 46, 44, 2, yes, no, 0, no) \
+ SC(167, 46, 44, 3, yes, no, 0, no) \
+ SC(168, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(169, 47, 45, 1, yes, no, 0, no) \
+ SC(170, 47, 45, 2, yes, no, 0, no) \
+ SC(171, 47, 45, 3, yes, no, 0, no) \
+ SC(172, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(173, 48, 46, 1, yes, no, 0, no) \
+ SC(174, 48, 46, 2, yes, no, 0, no) \
+ SC(175, 48, 46, 3, yes, no, 0, no) \
+ SC(176, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(177, 49, 47, 1, yes, no, 0, no) \
+ SC(178, 49, 47, 2, yes, no, 0, no) \
+ SC(179, 49, 47, 3, yes, no, 0, no) \
+ SC(180, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(181, 50, 48, 1, yes, no, 0, no) \
+ SC(182, 50, 48, 2, yes, no, 0, no) \
+ SC(183, 50, 48, 3, yes, no, 0, no) \
+ SC(184, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(185, 51, 49, 1, yes, no, 0, no) \
+ SC(186, 51, 49, 2, yes, no, 0, no) \
+ SC(187, 51, 49, 3, yes, no, 0, no) \
+ SC(188, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(189, 52, 50, 1, yes, no, 0, no) \
+ SC(190, 52, 50, 2, yes, no, 0, no) \
+ SC(191, 52, 50, 3, yes, no, 0, no) \
+ SC(192, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(193, 53, 51, 1, yes, no, 0, no) \
+ SC(194, 53, 51, 2, yes, no, 0, no) \
+ SC(195, 53, 51, 3, yes, no, 0, no) \
+ SC(196, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(197, 54, 52, 1, yes, no, 0, no) \
+ SC(198, 54, 52, 2, yes, no, 0, no) \
+ SC(199, 54, 52, 3, yes, no, 0, no) \
+ SC(200, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(201, 55, 53, 1, yes, no, 0, no) \
+ SC(202, 55, 53, 2, yes, no, 0, no) \
+ SC(203, 55, 53, 3, yes, no, 0, no) \
+ SC(204, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(205, 56, 54, 1, yes, no, 0, no) \
+ SC(206, 56, 54, 2, yes, no, 0, no) \
+ SC(207, 56, 54, 3, yes, no, 0, no) \
+ SC(208, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(209, 57, 55, 1, yes, no, 0, no) \
+ SC(210, 57, 55, 2, yes, no, 0, no) \
+ SC(211, 57, 55, 3, yes, no, 0, no) \
+ SC(212, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(213, 58, 56, 1, yes, no, 0, no) \
+ SC(214, 58, 56, 2, yes, no, 0, no) \
+ SC(215, 58, 56, 3, yes, no, 0, no) \
+ SC(216, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(217, 59, 57, 1, yes, no, 0, no) \
+ SC(218, 59, 57, 2, yes, no, 0, no) \
+ SC(219, 59, 57, 3, yes, no, 0, no) \
+ SC(220, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(221, 60, 58, 1, yes, no, 0, no) \
+ SC(222, 60, 58, 2, yes, no, 0, no) \
+ SC(223, 60, 58, 3, yes, no, 0, no) \
+ SC(224, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(225, 61, 59, 1, yes, no, 0, no) \
+ SC(226, 61, 59, 2, yes, no, 0, no) \
+ SC(227, 61, 59, 3, yes, no, 0, no) \
+ SC(228, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(229, 62, 60, 1, yes, no, 0, no) \
+ SC(230, 62, 60, 2, yes, no, 0, no) \
+ SC(231, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 44
-#define NSIZES 232
-#define NPSIZES 191
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
-#define LG_LARGE_MINCLASS 16
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 44
+#define NSIZES 232
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 191
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
+#define LG_LARGE_MINCLASS 16
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 3, 3, 0, no, yes, 3) \
- \
- SC( 1, 3, 3, 1, no, yes, 3) \
- SC( 2, 4, 4, 1, no, yes, 4) \
- SC( 3, 4, 4, 2, no, yes, 4) \
- SC( 4, 4, 4, 3, no, yes, 4) \
- \
- SC( 5, 6, 4, 1, no, yes, 4) \
- SC( 6, 6, 4, 2, no, yes, 4) \
- SC( 7, 6, 4, 3, no, yes, 4) \
- SC( 8, 6, 4, 4, no, yes, 4) \
- \
- SC( 9, 7, 5, 1, no, yes, 5) \
- SC( 10, 7, 5, 2, no, yes, 5) \
- SC( 11, 7, 5, 3, no, yes, 5) \
- SC( 12, 7, 5, 4, no, yes, 5) \
- \
- SC( 13, 8, 6, 1, no, yes, 6) \
- SC( 14, 8, 6, 2, no, yes, 6) \
- SC( 15, 8, 6, 3, no, yes, 6) \
- SC( 16, 8, 6, 4, no, yes, 6) \
- \
- SC( 17, 9, 7, 1, no, yes, 7) \
- SC( 18, 9, 7, 2, no, yes, 7) \
- SC( 19, 9, 7, 3, no, yes, 7) \
- SC( 20, 9, 7, 4, no, yes, 7) \
- \
- SC( 21, 10, 8, 1, no, yes, 8) \
- SC( 22, 10, 8, 2, no, yes, 8) \
- SC( 23, 10, 8, 3, no, yes, 8) \
- SC( 24, 10, 8, 4, no, yes, 8) \
- \
- SC( 25, 11, 9, 1, no, yes, 9) \
- SC( 26, 11, 9, 2, no, yes, 9) \
- SC( 27, 11, 9, 3, no, yes, 9) \
- SC( 28, 11, 9, 4, no, yes, 9) \
- \
- SC( 29, 12, 10, 1, no, yes, no) \
- SC( 30, 12, 10, 2, no, yes, no) \
- SC( 31, 12, 10, 3, no, yes, no) \
- SC( 32, 12, 10, 4, no, yes, no) \
- \
- SC( 33, 13, 11, 1, no, yes, no) \
- SC( 34, 13, 11, 2, no, yes, no) \
- SC( 35, 13, 11, 3, no, yes, no) \
- SC( 36, 13, 11, 4, no, yes, no) \
- \
- SC( 37, 14, 12, 1, no, yes, no) \
- SC( 38, 14, 12, 2, no, yes, no) \
- SC( 39, 14, 12, 3, no, yes, no) \
- SC( 40, 14, 12, 4, no, yes, no) \
- \
- SC( 41, 15, 13, 1, no, yes, no) \
- SC( 42, 15, 13, 2, no, yes, no) \
- SC( 43, 15, 13, 3, no, yes, no) \
- SC( 44, 15, 13, 4, yes, yes, no) \
- \
- SC( 45, 16, 14, 1, no, yes, no) \
- SC( 46, 16, 14, 2, no, yes, no) \
- SC( 47, 16, 14, 3, no, yes, no) \
- SC( 48, 16, 14, 4, yes, yes, no) \
- \
- SC( 49, 17, 15, 1, no, yes, no) \
- SC( 50, 17, 15, 2, yes, yes, no) \
- SC( 51, 17, 15, 3, no, yes, no) \
- SC( 52, 17, 15, 4, yes, no, no) \
- \
- SC( 53, 18, 16, 1, yes, no, no) \
- SC( 54, 18, 16, 2, yes, no, no) \
- SC( 55, 18, 16, 3, yes, no, no) \
- SC( 56, 18, 16, 4, yes, no, no) \
- \
- SC( 57, 19, 17, 1, yes, no, no) \
- SC( 58, 19, 17, 2, yes, no, no) \
- SC( 59, 19, 17, 3, yes, no, no) \
- SC( 60, 19, 17, 4, yes, no, no) \
- \
- SC( 61, 20, 18, 1, yes, no, no) \
- SC( 62, 20, 18, 2, yes, no, no) \
- SC( 63, 20, 18, 3, yes, no, no) \
- SC( 64, 20, 18, 4, yes, no, no) \
- \
- SC( 65, 21, 19, 1, yes, no, no) \
- SC( 66, 21, 19, 2, yes, no, no) \
- SC( 67, 21, 19, 3, yes, no, no) \
- SC( 68, 21, 19, 4, yes, no, no) \
- \
- SC( 69, 22, 20, 1, yes, no, no) \
- SC( 70, 22, 20, 2, yes, no, no) \
- SC( 71, 22, 20, 3, yes, no, no) \
- SC( 72, 22, 20, 4, yes, no, no) \
- \
- SC( 73, 23, 21, 1, yes, no, no) \
- SC( 74, 23, 21, 2, yes, no, no) \
- SC( 75, 23, 21, 3, yes, no, no) \
- SC( 76, 23, 21, 4, yes, no, no) \
- \
- SC( 77, 24, 22, 1, yes, no, no) \
- SC( 78, 24, 22, 2, yes, no, no) \
- SC( 79, 24, 22, 3, yes, no, no) \
- SC( 80, 24, 22, 4, yes, no, no) \
- \
- SC( 81, 25, 23, 1, yes, no, no) \
- SC( 82, 25, 23, 2, yes, no, no) \
- SC( 83, 25, 23, 3, yes, no, no) \
- SC( 84, 25, 23, 4, yes, no, no) \
- \
- SC( 85, 26, 24, 1, yes, no, no) \
- SC( 86, 26, 24, 2, yes, no, no) \
- SC( 87, 26, 24, 3, yes, no, no) \
- SC( 88, 26, 24, 4, yes, no, no) \
- \
- SC( 89, 27, 25, 1, yes, no, no) \
- SC( 90, 27, 25, 2, yes, no, no) \
- SC( 91, 27, 25, 3, yes, no, no) \
- SC( 92, 27, 25, 4, yes, no, no) \
- \
- SC( 93, 28, 26, 1, yes, no, no) \
- SC( 94, 28, 26, 2, yes, no, no) \
- SC( 95, 28, 26, 3, yes, no, no) \
- SC( 96, 28, 26, 4, yes, no, no) \
- \
- SC( 97, 29, 27, 1, yes, no, no) \
- SC( 98, 29, 27, 2, yes, no, no) \
- SC( 99, 29, 27, 3, yes, no, no) \
- SC(100, 29, 27, 4, yes, no, no) \
- \
- SC(101, 30, 28, 1, yes, no, no) \
- SC(102, 30, 28, 2, yes, no, no) \
- SC(103, 30, 28, 3, yes, no, no) \
- SC(104, 30, 28, 4, yes, no, no) \
- \
- SC(105, 31, 29, 1, yes, no, no) \
- SC(106, 31, 29, 2, yes, no, no) \
- SC(107, 31, 29, 3, yes, no, no) \
- SC(108, 31, 29, 4, yes, no, no) \
- \
- SC(109, 32, 30, 1, yes, no, no) \
- SC(110, 32, 30, 2, yes, no, no) \
- SC(111, 32, 30, 3, yes, no, no) \
- SC(112, 32, 30, 4, yes, no, no) \
- \
- SC(113, 33, 31, 1, yes, no, no) \
- SC(114, 33, 31, 2, yes, no, no) \
- SC(115, 33, 31, 3, yes, no, no) \
- SC(116, 33, 31, 4, yes, no, no) \
- \
- SC(117, 34, 32, 1, yes, no, no) \
- SC(118, 34, 32, 2, yes, no, no) \
- SC(119, 34, 32, 3, yes, no, no) \
- SC(120, 34, 32, 4, yes, no, no) \
- \
- SC(121, 35, 33, 1, yes, no, no) \
- SC(122, 35, 33, 2, yes, no, no) \
- SC(123, 35, 33, 3, yes, no, no) \
- SC(124, 35, 33, 4, yes, no, no) \
- \
- SC(125, 36, 34, 1, yes, no, no) \
- SC(126, 36, 34, 2, yes, no, no) \
- SC(127, 36, 34, 3, yes, no, no) \
- SC(128, 36, 34, 4, yes, no, no) \
- \
- SC(129, 37, 35, 1, yes, no, no) \
- SC(130, 37, 35, 2, yes, no, no) \
- SC(131, 37, 35, 3, yes, no, no) \
- SC(132, 37, 35, 4, yes, no, no) \
- \
- SC(133, 38, 36, 1, yes, no, no) \
- SC(134, 38, 36, 2, yes, no, no) \
- SC(135, 38, 36, 3, yes, no, no) \
- SC(136, 38, 36, 4, yes, no, no) \
- \
- SC(137, 39, 37, 1, yes, no, no) \
- SC(138, 39, 37, 2, yes, no, no) \
- SC(139, 39, 37, 3, yes, no, no) \
- SC(140, 39, 37, 4, yes, no, no) \
- \
- SC(141, 40, 38, 1, yes, no, no) \
- SC(142, 40, 38, 2, yes, no, no) \
- SC(143, 40, 38, 3, yes, no, no) \
- SC(144, 40, 38, 4, yes, no, no) \
- \
- SC(145, 41, 39, 1, yes, no, no) \
- SC(146, 41, 39, 2, yes, no, no) \
- SC(147, 41, 39, 3, yes, no, no) \
- SC(148, 41, 39, 4, yes, no, no) \
- \
- SC(149, 42, 40, 1, yes, no, no) \
- SC(150, 42, 40, 2, yes, no, no) \
- SC(151, 42, 40, 3, yes, no, no) \
- SC(152, 42, 40, 4, yes, no, no) \
- \
- SC(153, 43, 41, 1, yes, no, no) \
- SC(154, 43, 41, 2, yes, no, no) \
- SC(155, 43, 41, 3, yes, no, no) \
- SC(156, 43, 41, 4, yes, no, no) \
- \
- SC(157, 44, 42, 1, yes, no, no) \
- SC(158, 44, 42, 2, yes, no, no) \
- SC(159, 44, 42, 3, yes, no, no) \
- SC(160, 44, 42, 4, yes, no, no) \
- \
- SC(161, 45, 43, 1, yes, no, no) \
- SC(162, 45, 43, 2, yes, no, no) \
- SC(163, 45, 43, 3, yes, no, no) \
- SC(164, 45, 43, 4, yes, no, no) \
- \
- SC(165, 46, 44, 1, yes, no, no) \
- SC(166, 46, 44, 2, yes, no, no) \
- SC(167, 46, 44, 3, yes, no, no) \
- SC(168, 46, 44, 4, yes, no, no) \
- \
- SC(169, 47, 45, 1, yes, no, no) \
- SC(170, 47, 45, 2, yes, no, no) \
- SC(171, 47, 45, 3, yes, no, no) \
- SC(172, 47, 45, 4, yes, no, no) \
- \
- SC(173, 48, 46, 1, yes, no, no) \
- SC(174, 48, 46, 2, yes, no, no) \
- SC(175, 48, 46, 3, yes, no, no) \
- SC(176, 48, 46, 4, yes, no, no) \
- \
- SC(177, 49, 47, 1, yes, no, no) \
- SC(178, 49, 47, 2, yes, no, no) \
- SC(179, 49, 47, 3, yes, no, no) \
- SC(180, 49, 47, 4, yes, no, no) \
- \
- SC(181, 50, 48, 1, yes, no, no) \
- SC(182, 50, 48, 2, yes, no, no) \
- SC(183, 50, 48, 3, yes, no, no) \
- SC(184, 50, 48, 4, yes, no, no) \
- \
- SC(185, 51, 49, 1, yes, no, no) \
- SC(186, 51, 49, 2, yes, no, no) \
- SC(187, 51, 49, 3, yes, no, no) \
- SC(188, 51, 49, 4, yes, no, no) \
- \
- SC(189, 52, 50, 1, yes, no, no) \
- SC(190, 52, 50, 2, yes, no, no) \
- SC(191, 52, 50, 3, yes, no, no) \
- SC(192, 52, 50, 4, yes, no, no) \
- \
- SC(193, 53, 51, 1, yes, no, no) \
- SC(194, 53, 51, 2, yes, no, no) \
- SC(195, 53, 51, 3, yes, no, no) \
- SC(196, 53, 51, 4, yes, no, no) \
- \
- SC(197, 54, 52, 1, yes, no, no) \
- SC(198, 54, 52, 2, yes, no, no) \
- SC(199, 54, 52, 3, yes, no, no) \
- SC(200, 54, 52, 4, yes, no, no) \
- \
- SC(201, 55, 53, 1, yes, no, no) \
- SC(202, 55, 53, 2, yes, no, no) \
- SC(203, 55, 53, 3, yes, no, no) \
- SC(204, 55, 53, 4, yes, no, no) \
- \
- SC(205, 56, 54, 1, yes, no, no) \
- SC(206, 56, 54, 2, yes, no, no) \
- SC(207, 56, 54, 3, yes, no, no) \
- SC(208, 56, 54, 4, yes, no, no) \
- \
- SC(209, 57, 55, 1, yes, no, no) \
- SC(210, 57, 55, 2, yes, no, no) \
- SC(211, 57, 55, 3, yes, no, no) \
- SC(212, 57, 55, 4, yes, no, no) \
- \
- SC(213, 58, 56, 1, yes, no, no) \
- SC(214, 58, 56, 2, yes, no, no) \
- SC(215, 58, 56, 3, yes, no, no) \
- SC(216, 58, 56, 4, yes, no, no) \
- \
- SC(217, 59, 57, 1, yes, no, no) \
- SC(218, 59, 57, 2, yes, no, no) \
- SC(219, 59, 57, 3, yes, no, no) \
- SC(220, 59, 57, 4, yes, no, no) \
- \
- SC(221, 60, 58, 1, yes, no, no) \
- SC(222, 60, 58, 2, yes, no, no) \
- SC(223, 60, 58, 3, yes, no, no) \
- SC(224, 60, 58, 4, yes, no, no) \
- \
- SC(225, 61, 59, 1, yes, no, no) \
- SC(226, 61, 59, 2, yes, no, no) \
- SC(227, 61, 59, 3, yes, no, no) \
- SC(228, 61, 59, 4, yes, no, no) \
- \
- SC(229, 62, 60, 1, yes, no, no) \
- SC(230, 62, 60, 2, yes, no, no) \
- SC(231, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 3, 3, 0, no, yes, 1, 3) \
+ \
+ SC( 1, 3, 3, 1, no, yes, 1, 3) \
+ SC( 2, 4, 4, 1, no, yes, 1, 4) \
+ SC( 3, 4, 4, 2, no, yes, 3, 4) \
+ SC( 4, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 5, 6, 4, 1, no, yes, 5, 4) \
+ SC( 6, 6, 4, 2, no, yes, 3, 4) \
+ SC( 7, 6, 4, 3, no, yes, 7, 4) \
+ SC( 8, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 9, 7, 5, 1, no, yes, 5, 5) \
+ SC( 10, 7, 5, 2, no, yes, 3, 5) \
+ SC( 11, 7, 5, 3, no, yes, 7, 5) \
+ SC( 12, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 13, 8, 6, 1, no, yes, 5, 6) \
+ SC( 14, 8, 6, 2, no, yes, 3, 6) \
+ SC( 15, 8, 6, 3, no, yes, 7, 6) \
+ SC( 16, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 17, 9, 7, 1, no, yes, 5, 7) \
+ SC( 18, 9, 7, 2, no, yes, 3, 7) \
+ SC( 19, 9, 7, 3, no, yes, 7, 7) \
+ SC( 20, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 21, 10, 8, 1, no, yes, 5, 8) \
+ SC( 22, 10, 8, 2, no, yes, 3, 8) \
+ SC( 23, 10, 8, 3, no, yes, 7, 8) \
+ SC( 24, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 25, 11, 9, 1, no, yes, 5, 9) \
+ SC( 26, 11, 9, 2, no, yes, 3, 9) \
+ SC( 27, 11, 9, 3, no, yes, 7, 9) \
+ SC( 28, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 29, 12, 10, 1, no, yes, 5, no) \
+ SC( 30, 12, 10, 2, no, yes, 3, no) \
+ SC( 31, 12, 10, 3, no, yes, 7, no) \
+ SC( 32, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 33, 13, 11, 1, no, yes, 5, no) \
+ SC( 34, 13, 11, 2, no, yes, 3, no) \
+ SC( 35, 13, 11, 3, no, yes, 7, no) \
+ SC( 36, 13, 11, 4, no, yes, 1, no) \
+ \
+ SC( 37, 14, 12, 1, no, yes, 5, no) \
+ SC( 38, 14, 12, 2, no, yes, 3, no) \
+ SC( 39, 14, 12, 3, no, yes, 7, no) \
+ SC( 40, 14, 12, 4, no, yes, 1, no) \
+ \
+ SC( 41, 15, 13, 1, no, yes, 5, no) \
+ SC( 42, 15, 13, 2, no, yes, 3, no) \
+ SC( 43, 15, 13, 3, no, yes, 7, no) \
+ SC( 44, 15, 13, 4, yes, yes, 1, no) \
+ \
+ SC( 45, 16, 14, 1, no, yes, 5, no) \
+ SC( 46, 16, 14, 2, no, yes, 3, no) \
+ SC( 47, 16, 14, 3, no, yes, 7, no) \
+ SC( 48, 16, 14, 4, yes, yes, 2, no) \
+ \
+ SC( 49, 17, 15, 1, no, yes, 5, no) \
+ SC( 50, 17, 15, 2, yes, yes, 3, no) \
+ SC( 51, 17, 15, 3, no, yes, 7, no) \
+ SC( 52, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 53, 18, 16, 1, yes, no, 0, no) \
+ SC( 54, 18, 16, 2, yes, no, 0, no) \
+ SC( 55, 18, 16, 3, yes, no, 0, no) \
+ SC( 56, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 57, 19, 17, 1, yes, no, 0, no) \
+ SC( 58, 19, 17, 2, yes, no, 0, no) \
+ SC( 59, 19, 17, 3, yes, no, 0, no) \
+ SC( 60, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 61, 20, 18, 1, yes, no, 0, no) \
+ SC( 62, 20, 18, 2, yes, no, 0, no) \
+ SC( 63, 20, 18, 3, yes, no, 0, no) \
+ SC( 64, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 65, 21, 19, 1, yes, no, 0, no) \
+ SC( 66, 21, 19, 2, yes, no, 0, no) \
+ SC( 67, 21, 19, 3, yes, no, 0, no) \
+ SC( 68, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 69, 22, 20, 1, yes, no, 0, no) \
+ SC( 70, 22, 20, 2, yes, no, 0, no) \
+ SC( 71, 22, 20, 3, yes, no, 0, no) \
+ SC( 72, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 73, 23, 21, 1, yes, no, 0, no) \
+ SC( 74, 23, 21, 2, yes, no, 0, no) \
+ SC( 75, 23, 21, 3, yes, no, 0, no) \
+ SC( 76, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 77, 24, 22, 1, yes, no, 0, no) \
+ SC( 78, 24, 22, 2, yes, no, 0, no) \
+ SC( 79, 24, 22, 3, yes, no, 0, no) \
+ SC( 80, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 81, 25, 23, 1, yes, no, 0, no) \
+ SC( 82, 25, 23, 2, yes, no, 0, no) \
+ SC( 83, 25, 23, 3, yes, no, 0, no) \
+ SC( 84, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 85, 26, 24, 1, yes, no, 0, no) \
+ SC( 86, 26, 24, 2, yes, no, 0, no) \
+ SC( 87, 26, 24, 3, yes, no, 0, no) \
+ SC( 88, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 89, 27, 25, 1, yes, no, 0, no) \
+ SC( 90, 27, 25, 2, yes, no, 0, no) \
+ SC( 91, 27, 25, 3, yes, no, 0, no) \
+ SC( 92, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 93, 28, 26, 1, yes, no, 0, no) \
+ SC( 94, 28, 26, 2, yes, no, 0, no) \
+ SC( 95, 28, 26, 3, yes, no, 0, no) \
+ SC( 96, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 97, 29, 27, 1, yes, no, 0, no) \
+ SC( 98, 29, 27, 2, yes, no, 0, no) \
+ SC( 99, 29, 27, 3, yes, no, 0, no) \
+ SC(100, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(101, 30, 28, 1, yes, no, 0, no) \
+ SC(102, 30, 28, 2, yes, no, 0, no) \
+ SC(103, 30, 28, 3, yes, no, 0, no) \
+ SC(104, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(105, 31, 29, 1, yes, no, 0, no) \
+ SC(106, 31, 29, 2, yes, no, 0, no) \
+ SC(107, 31, 29, 3, yes, no, 0, no) \
+ SC(108, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(109, 32, 30, 1, yes, no, 0, no) \
+ SC(110, 32, 30, 2, yes, no, 0, no) \
+ SC(111, 32, 30, 3, yes, no, 0, no) \
+ SC(112, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(113, 33, 31, 1, yes, no, 0, no) \
+ SC(114, 33, 31, 2, yes, no, 0, no) \
+ SC(115, 33, 31, 3, yes, no, 0, no) \
+ SC(116, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(117, 34, 32, 1, yes, no, 0, no) \
+ SC(118, 34, 32, 2, yes, no, 0, no) \
+ SC(119, 34, 32, 3, yes, no, 0, no) \
+ SC(120, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(121, 35, 33, 1, yes, no, 0, no) \
+ SC(122, 35, 33, 2, yes, no, 0, no) \
+ SC(123, 35, 33, 3, yes, no, 0, no) \
+ SC(124, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(125, 36, 34, 1, yes, no, 0, no) \
+ SC(126, 36, 34, 2, yes, no, 0, no) \
+ SC(127, 36, 34, 3, yes, no, 0, no) \
+ SC(128, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(129, 37, 35, 1, yes, no, 0, no) \
+ SC(130, 37, 35, 2, yes, no, 0, no) \
+ SC(131, 37, 35, 3, yes, no, 0, no) \
+ SC(132, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(133, 38, 36, 1, yes, no, 0, no) \
+ SC(134, 38, 36, 2, yes, no, 0, no) \
+ SC(135, 38, 36, 3, yes, no, 0, no) \
+ SC(136, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(137, 39, 37, 1, yes, no, 0, no) \
+ SC(138, 39, 37, 2, yes, no, 0, no) \
+ SC(139, 39, 37, 3, yes, no, 0, no) \
+ SC(140, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(141, 40, 38, 1, yes, no, 0, no) \
+ SC(142, 40, 38, 2, yes, no, 0, no) \
+ SC(143, 40, 38, 3, yes, no, 0, no) \
+ SC(144, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(145, 41, 39, 1, yes, no, 0, no) \
+ SC(146, 41, 39, 2, yes, no, 0, no) \
+ SC(147, 41, 39, 3, yes, no, 0, no) \
+ SC(148, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(149, 42, 40, 1, yes, no, 0, no) \
+ SC(150, 42, 40, 2, yes, no, 0, no) \
+ SC(151, 42, 40, 3, yes, no, 0, no) \
+ SC(152, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(153, 43, 41, 1, yes, no, 0, no) \
+ SC(154, 43, 41, 2, yes, no, 0, no) \
+ SC(155, 43, 41, 3, yes, no, 0, no) \
+ SC(156, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(157, 44, 42, 1, yes, no, 0, no) \
+ SC(158, 44, 42, 2, yes, no, 0, no) \
+ SC(159, 44, 42, 3, yes, no, 0, no) \
+ SC(160, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(161, 45, 43, 1, yes, no, 0, no) \
+ SC(162, 45, 43, 2, yes, no, 0, no) \
+ SC(163, 45, 43, 3, yes, no, 0, no) \
+ SC(164, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(165, 46, 44, 1, yes, no, 0, no) \
+ SC(166, 46, 44, 2, yes, no, 0, no) \
+ SC(167, 46, 44, 3, yes, no, 0, no) \
+ SC(168, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(169, 47, 45, 1, yes, no, 0, no) \
+ SC(170, 47, 45, 2, yes, no, 0, no) \
+ SC(171, 47, 45, 3, yes, no, 0, no) \
+ SC(172, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(173, 48, 46, 1, yes, no, 0, no) \
+ SC(174, 48, 46, 2, yes, no, 0, no) \
+ SC(175, 48, 46, 3, yes, no, 0, no) \
+ SC(176, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(177, 49, 47, 1, yes, no, 0, no) \
+ SC(178, 49, 47, 2, yes, no, 0, no) \
+ SC(179, 49, 47, 3, yes, no, 0, no) \
+ SC(180, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(181, 50, 48, 1, yes, no, 0, no) \
+ SC(182, 50, 48, 2, yes, no, 0, no) \
+ SC(183, 50, 48, 3, yes, no, 0, no) \
+ SC(184, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(185, 51, 49, 1, yes, no, 0, no) \
+ SC(186, 51, 49, 2, yes, no, 0, no) \
+ SC(187, 51, 49, 3, yes, no, 0, no) \
+ SC(188, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(189, 52, 50, 1, yes, no, 0, no) \
+ SC(190, 52, 50, 2, yes, no, 0, no) \
+ SC(191, 52, 50, 3, yes, no, 0, no) \
+ SC(192, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(193, 53, 51, 1, yes, no, 0, no) \
+ SC(194, 53, 51, 2, yes, no, 0, no) \
+ SC(195, 53, 51, 3, yes, no, 0, no) \
+ SC(196, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(197, 54, 52, 1, yes, no, 0, no) \
+ SC(198, 54, 52, 2, yes, no, 0, no) \
+ SC(199, 54, 52, 3, yes, no, 0, no) \
+ SC(200, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(201, 55, 53, 1, yes, no, 0, no) \
+ SC(202, 55, 53, 2, yes, no, 0, no) \
+ SC(203, 55, 53, 3, yes, no, 0, no) \
+ SC(204, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(205, 56, 54, 1, yes, no, 0, no) \
+ SC(206, 56, 54, 2, yes, no, 0, no) \
+ SC(207, 56, 54, 3, yes, no, 0, no) \
+ SC(208, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(209, 57, 55, 1, yes, no, 0, no) \
+ SC(210, 57, 55, 2, yes, no, 0, no) \
+ SC(211, 57, 55, 3, yes, no, 0, no) \
+ SC(212, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(213, 58, 56, 1, yes, no, 0, no) \
+ SC(214, 58, 56, 2, yes, no, 0, no) \
+ SC(215, 58, 56, 3, yes, no, 0, no) \
+ SC(216, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(217, 59, 57, 1, yes, no, 0, no) \
+ SC(218, 59, 57, 2, yes, no, 0, no) \
+ SC(219, 59, 57, 3, yes, no, 0, no) \
+ SC(220, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(221, 60, 58, 1, yes, no, 0, no) \
+ SC(222, 60, 58, 2, yes, no, 0, no) \
+ SC(223, 60, 58, 3, yes, no, 0, no) \
+ SC(224, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(225, 61, 59, 1, yes, no, 0, no) \
+ SC(226, 61, 59, 2, yes, no, 0, no) \
+ SC(227, 61, 59, 3, yes, no, 0, no) \
+ SC(228, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(229, 62, 60, 1, yes, no, 0, no) \
+ SC(230, 62, 60, 2, yes, no, 0, no) \
+ SC(231, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 1
-#define NLBINS 29
-#define NBINS 52
-#define NSIZES 232
-#define NPSIZES 183
-#define LG_TINY_MAXCLASS 3
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
-#define LG_LARGE_MINCLASS 18
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 1
+#define NLBINS 29
+#define NBINS 52
+#define NSIZES 232
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 183
+#define LG_TINY_MAXCLASS 3
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
+#define LG_LARGE_MINCLASS 18
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, yes, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, yes, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, yes, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, yes, no, no) \
- \
- SC( 36, 14, 12, 1, yes, no, no) \
- SC( 37, 14, 12, 2, yes, no, no) \
- SC( 38, 14, 12, 3, yes, no, no) \
- SC( 39, 14, 12, 4, yes, no, no) \
- \
- SC( 40, 15, 13, 1, yes, no, no) \
- SC( 41, 15, 13, 2, yes, no, no) \
- SC( 42, 15, 13, 3, yes, no, no) \
- SC( 43, 15, 13, 4, yes, no, no) \
- \
- SC( 44, 16, 14, 1, yes, no, no) \
- SC( 45, 16, 14, 2, yes, no, no) \
- SC( 46, 16, 14, 3, yes, no, no) \
- SC( 47, 16, 14, 4, yes, no, no) \
- \
- SC( 48, 17, 15, 1, yes, no, no) \
- SC( 49, 17, 15, 2, yes, no, no) \
- SC( 50, 17, 15, 3, yes, no, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
- SC(103, 30, 28, 4, yes, no, no) \
- \
- SC(104, 31, 29, 1, yes, no, no) \
- SC(105, 31, 29, 2, yes, no, no) \
- SC(106, 31, 29, 3, yes, no, no) \
- SC(107, 31, 29, 4, yes, no, no) \
- \
- SC(108, 32, 30, 1, yes, no, no) \
- SC(109, 32, 30, 2, yes, no, no) \
- SC(110, 32, 30, 3, yes, no, no) \
- SC(111, 32, 30, 4, yes, no, no) \
- \
- SC(112, 33, 31, 1, yes, no, no) \
- SC(113, 33, 31, 2, yes, no, no) \
- SC(114, 33, 31, 3, yes, no, no) \
- SC(115, 33, 31, 4, yes, no, no) \
- \
- SC(116, 34, 32, 1, yes, no, no) \
- SC(117, 34, 32, 2, yes, no, no) \
- SC(118, 34, 32, 3, yes, no, no) \
- SC(119, 34, 32, 4, yes, no, no) \
- \
- SC(120, 35, 33, 1, yes, no, no) \
- SC(121, 35, 33, 2, yes, no, no) \
- SC(122, 35, 33, 3, yes, no, no) \
- SC(123, 35, 33, 4, yes, no, no) \
- \
- SC(124, 36, 34, 1, yes, no, no) \
- SC(125, 36, 34, 2, yes, no, no) \
- SC(126, 36, 34, 3, yes, no, no) \
- SC(127, 36, 34, 4, yes, no, no) \
- \
- SC(128, 37, 35, 1, yes, no, no) \
- SC(129, 37, 35, 2, yes, no, no) \
- SC(130, 37, 35, 3, yes, no, no) \
- SC(131, 37, 35, 4, yes, no, no) \
- \
- SC(132, 38, 36, 1, yes, no, no) \
- SC(133, 38, 36, 2, yes, no, no) \
- SC(134, 38, 36, 3, yes, no, no) \
- SC(135, 38, 36, 4, yes, no, no) \
- \
- SC(136, 39, 37, 1, yes, no, no) \
- SC(137, 39, 37, 2, yes, no, no) \
- SC(138, 39, 37, 3, yes, no, no) \
- SC(139, 39, 37, 4, yes, no, no) \
- \
- SC(140, 40, 38, 1, yes, no, no) \
- SC(141, 40, 38, 2, yes, no, no) \
- SC(142, 40, 38, 3, yes, no, no) \
- SC(143, 40, 38, 4, yes, no, no) \
- \
- SC(144, 41, 39, 1, yes, no, no) \
- SC(145, 41, 39, 2, yes, no, no) \
- SC(146, 41, 39, 3, yes, no, no) \
- SC(147, 41, 39, 4, yes, no, no) \
- \
- SC(148, 42, 40, 1, yes, no, no) \
- SC(149, 42, 40, 2, yes, no, no) \
- SC(150, 42, 40, 3, yes, no, no) \
- SC(151, 42, 40, 4, yes, no, no) \
- \
- SC(152, 43, 41, 1, yes, no, no) \
- SC(153, 43, 41, 2, yes, no, no) \
- SC(154, 43, 41, 3, yes, no, no) \
- SC(155, 43, 41, 4, yes, no, no) \
- \
- SC(156, 44, 42, 1, yes, no, no) \
- SC(157, 44, 42, 2, yes, no, no) \
- SC(158, 44, 42, 3, yes, no, no) \
- SC(159, 44, 42, 4, yes, no, no) \
- \
- SC(160, 45, 43, 1, yes, no, no) \
- SC(161, 45, 43, 2, yes, no, no) \
- SC(162, 45, 43, 3, yes, no, no) \
- SC(163, 45, 43, 4, yes, no, no) \
- \
- SC(164, 46, 44, 1, yes, no, no) \
- SC(165, 46, 44, 2, yes, no, no) \
- SC(166, 46, 44, 3, yes, no, no) \
- SC(167, 46, 44, 4, yes, no, no) \
- \
- SC(168, 47, 45, 1, yes, no, no) \
- SC(169, 47, 45, 2, yes, no, no) \
- SC(170, 47, 45, 3, yes, no, no) \
- SC(171, 47, 45, 4, yes, no, no) \
- \
- SC(172, 48, 46, 1, yes, no, no) \
- SC(173, 48, 46, 2, yes, no, no) \
- SC(174, 48, 46, 3, yes, no, no) \
- SC(175, 48, 46, 4, yes, no, no) \
- \
- SC(176, 49, 47, 1, yes, no, no) \
- SC(177, 49, 47, 2, yes, no, no) \
- SC(178, 49, 47, 3, yes, no, no) \
- SC(179, 49, 47, 4, yes, no, no) \
- \
- SC(180, 50, 48, 1, yes, no, no) \
- SC(181, 50, 48, 2, yes, no, no) \
- SC(182, 50, 48, 3, yes, no, no) \
- SC(183, 50, 48, 4, yes, no, no) \
- \
- SC(184, 51, 49, 1, yes, no, no) \
- SC(185, 51, 49, 2, yes, no, no) \
- SC(186, 51, 49, 3, yes, no, no) \
- SC(187, 51, 49, 4, yes, no, no) \
- \
- SC(188, 52, 50, 1, yes, no, no) \
- SC(189, 52, 50, 2, yes, no, no) \
- SC(190, 52, 50, 3, yes, no, no) \
- SC(191, 52, 50, 4, yes, no, no) \
- \
- SC(192, 53, 51, 1, yes, no, no) \
- SC(193, 53, 51, 2, yes, no, no) \
- SC(194, 53, 51, 3, yes, no, no) \
- SC(195, 53, 51, 4, yes, no, no) \
- \
- SC(196, 54, 52, 1, yes, no, no) \
- SC(197, 54, 52, 2, yes, no, no) \
- SC(198, 54, 52, 3, yes, no, no) \
- SC(199, 54, 52, 4, yes, no, no) \
- \
- SC(200, 55, 53, 1, yes, no, no) \
- SC(201, 55, 53, 2, yes, no, no) \
- SC(202, 55, 53, 3, yes, no, no) \
- SC(203, 55, 53, 4, yes, no, no) \
- \
- SC(204, 56, 54, 1, yes, no, no) \
- SC(205, 56, 54, 2, yes, no, no) \
- SC(206, 56, 54, 3, yes, no, no) \
- SC(207, 56, 54, 4, yes, no, no) \
- \
- SC(208, 57, 55, 1, yes, no, no) \
- SC(209, 57, 55, 2, yes, no, no) \
- SC(210, 57, 55, 3, yes, no, no) \
- SC(211, 57, 55, 4, yes, no, no) \
- \
- SC(212, 58, 56, 1, yes, no, no) \
- SC(213, 58, 56, 2, yes, no, no) \
- SC(214, 58, 56, 3, yes, no, no) \
- SC(215, 58, 56, 4, yes, no, no) \
- \
- SC(216, 59, 57, 1, yes, no, no) \
- SC(217, 59, 57, 2, yes, no, no) \
- SC(218, 59, 57, 3, yes, no, no) \
- SC(219, 59, 57, 4, yes, no, no) \
- \
- SC(220, 60, 58, 1, yes, no, no) \
- SC(221, 60, 58, 2, yes, no, no) \
- SC(222, 60, 58, 3, yes, no, no) \
- SC(223, 60, 58, 4, yes, no, no) \
- \
- SC(224, 61, 59, 1, yes, no, no) \
- SC(225, 61, 59, 2, yes, no, no) \
- SC(226, 61, 59, 3, yes, no, no) \
- SC(227, 61, 59, 4, yes, no, no) \
- \
- SC(228, 62, 60, 1, yes, no, no) \
- SC(229, 62, 60, 2, yes, no, no) \
- SC(230, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, yes, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, yes, yes, 2, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, yes, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, yes, no, 0, no) \
+ \
+ SC( 36, 14, 12, 1, yes, no, 0, no) \
+ SC( 37, 14, 12, 2, yes, no, 0, no) \
+ SC( 38, 14, 12, 3, yes, no, 0, no) \
+ SC( 39, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 40, 15, 13, 1, yes, no, 0, no) \
+ SC( 41, 15, 13, 2, yes, no, 0, no) \
+ SC( 42, 15, 13, 3, yes, no, 0, no) \
+ SC( 43, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 44, 16, 14, 1, yes, no, 0, no) \
+ SC( 45, 16, 14, 2, yes, no, 0, no) \
+ SC( 46, 16, 14, 3, yes, no, 0, no) \
+ SC( 47, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 48, 17, 15, 1, yes, no, 0, no) \
+ SC( 49, 17, 15, 2, yes, no, 0, no) \
+ SC( 50, 17, 15, 3, yes, no, 0, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
+ SC(103, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(104, 31, 29, 1, yes, no, 0, no) \
+ SC(105, 31, 29, 2, yes, no, 0, no) \
+ SC(106, 31, 29, 3, yes, no, 0, no) \
+ SC(107, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(108, 32, 30, 1, yes, no, 0, no) \
+ SC(109, 32, 30, 2, yes, no, 0, no) \
+ SC(110, 32, 30, 3, yes, no, 0, no) \
+ SC(111, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(112, 33, 31, 1, yes, no, 0, no) \
+ SC(113, 33, 31, 2, yes, no, 0, no) \
+ SC(114, 33, 31, 3, yes, no, 0, no) \
+ SC(115, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(116, 34, 32, 1, yes, no, 0, no) \
+ SC(117, 34, 32, 2, yes, no, 0, no) \
+ SC(118, 34, 32, 3, yes, no, 0, no) \
+ SC(119, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(120, 35, 33, 1, yes, no, 0, no) \
+ SC(121, 35, 33, 2, yes, no, 0, no) \
+ SC(122, 35, 33, 3, yes, no, 0, no) \
+ SC(123, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(124, 36, 34, 1, yes, no, 0, no) \
+ SC(125, 36, 34, 2, yes, no, 0, no) \
+ SC(126, 36, 34, 3, yes, no, 0, no) \
+ SC(127, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(128, 37, 35, 1, yes, no, 0, no) \
+ SC(129, 37, 35, 2, yes, no, 0, no) \
+ SC(130, 37, 35, 3, yes, no, 0, no) \
+ SC(131, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(132, 38, 36, 1, yes, no, 0, no) \
+ SC(133, 38, 36, 2, yes, no, 0, no) \
+ SC(134, 38, 36, 3, yes, no, 0, no) \
+ SC(135, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(136, 39, 37, 1, yes, no, 0, no) \
+ SC(137, 39, 37, 2, yes, no, 0, no) \
+ SC(138, 39, 37, 3, yes, no, 0, no) \
+ SC(139, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(140, 40, 38, 1, yes, no, 0, no) \
+ SC(141, 40, 38, 2, yes, no, 0, no) \
+ SC(142, 40, 38, 3, yes, no, 0, no) \
+ SC(143, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(144, 41, 39, 1, yes, no, 0, no) \
+ SC(145, 41, 39, 2, yes, no, 0, no) \
+ SC(146, 41, 39, 3, yes, no, 0, no) \
+ SC(147, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(148, 42, 40, 1, yes, no, 0, no) \
+ SC(149, 42, 40, 2, yes, no, 0, no) \
+ SC(150, 42, 40, 3, yes, no, 0, no) \
+ SC(151, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(152, 43, 41, 1, yes, no, 0, no) \
+ SC(153, 43, 41, 2, yes, no, 0, no) \
+ SC(154, 43, 41, 3, yes, no, 0, no) \
+ SC(155, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(156, 44, 42, 1, yes, no, 0, no) \
+ SC(157, 44, 42, 2, yes, no, 0, no) \
+ SC(158, 44, 42, 3, yes, no, 0, no) \
+ SC(159, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(160, 45, 43, 1, yes, no, 0, no) \
+ SC(161, 45, 43, 2, yes, no, 0, no) \
+ SC(162, 45, 43, 3, yes, no, 0, no) \
+ SC(163, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(164, 46, 44, 1, yes, no, 0, no) \
+ SC(165, 46, 44, 2, yes, no, 0, no) \
+ SC(166, 46, 44, 3, yes, no, 0, no) \
+ SC(167, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(168, 47, 45, 1, yes, no, 0, no) \
+ SC(169, 47, 45, 2, yes, no, 0, no) \
+ SC(170, 47, 45, 3, yes, no, 0, no) \
+ SC(171, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(172, 48, 46, 1, yes, no, 0, no) \
+ SC(173, 48, 46, 2, yes, no, 0, no) \
+ SC(174, 48, 46, 3, yes, no, 0, no) \
+ SC(175, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(176, 49, 47, 1, yes, no, 0, no) \
+ SC(177, 49, 47, 2, yes, no, 0, no) \
+ SC(178, 49, 47, 3, yes, no, 0, no) \
+ SC(179, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(180, 50, 48, 1, yes, no, 0, no) \
+ SC(181, 50, 48, 2, yes, no, 0, no) \
+ SC(182, 50, 48, 3, yes, no, 0, no) \
+ SC(183, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(184, 51, 49, 1, yes, no, 0, no) \
+ SC(185, 51, 49, 2, yes, no, 0, no) \
+ SC(186, 51, 49, 3, yes, no, 0, no) \
+ SC(187, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(188, 52, 50, 1, yes, no, 0, no) \
+ SC(189, 52, 50, 2, yes, no, 0, no) \
+ SC(190, 52, 50, 3, yes, no, 0, no) \
+ SC(191, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(192, 53, 51, 1, yes, no, 0, no) \
+ SC(193, 53, 51, 2, yes, no, 0, no) \
+ SC(194, 53, 51, 3, yes, no, 0, no) \
+ SC(195, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(196, 54, 52, 1, yes, no, 0, no) \
+ SC(197, 54, 52, 2, yes, no, 0, no) \
+ SC(198, 54, 52, 3, yes, no, 0, no) \
+ SC(199, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(200, 55, 53, 1, yes, no, 0, no) \
+ SC(201, 55, 53, 2, yes, no, 0, no) \
+ SC(202, 55, 53, 3, yes, no, 0, no) \
+ SC(203, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(204, 56, 54, 1, yes, no, 0, no) \
+ SC(205, 56, 54, 2, yes, no, 0, no) \
+ SC(206, 56, 54, 3, yes, no, 0, no) \
+ SC(207, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(208, 57, 55, 1, yes, no, 0, no) \
+ SC(209, 57, 55, 2, yes, no, 0, no) \
+ SC(210, 57, 55, 3, yes, no, 0, no) \
+ SC(211, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(212, 58, 56, 1, yes, no, 0, no) \
+ SC(213, 58, 56, 2, yes, no, 0, no) \
+ SC(214, 58, 56, 3, yes, no, 0, no) \
+ SC(215, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(216, 59, 57, 1, yes, no, 0, no) \
+ SC(217, 59, 57, 2, yes, no, 0, no) \
+ SC(218, 59, 57, 3, yes, no, 0, no) \
+ SC(219, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(220, 60, 58, 1, yes, no, 0, no) \
+ SC(221, 60, 58, 2, yes, no, 0, no) \
+ SC(222, 60, 58, 3, yes, no, 0, no) \
+ SC(223, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(224, 61, 59, 1, yes, no, 0, no) \
+ SC(225, 61, 59, 2, yes, no, 0, no) \
+ SC(226, 61, 59, 3, yes, no, 0, no) \
+ SC(227, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(228, 62, 60, 1, yes, no, 0, no) \
+ SC(229, 62, 60, 2, yes, no, 0, no) \
+ SC(230, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 35
-#define NSIZES 231
-#define NPSIZES 199
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
-#define LG_LARGE_MINCLASS 14
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 35
+#define NSIZES 231
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 199
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11))
+#define LG_LARGE_MINCLASS 14
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, no, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, yes, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, no, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, yes, yes, no) \
- \
- SC( 36, 14, 12, 1, no, yes, no) \
- SC( 37, 14, 12, 2, yes, yes, no) \
- SC( 38, 14, 12, 3, no, yes, no) \
- SC( 39, 14, 12, 4, yes, no, no) \
- \
- SC( 40, 15, 13, 1, yes, no, no) \
- SC( 41, 15, 13, 2, yes, no, no) \
- SC( 42, 15, 13, 3, yes, no, no) \
- SC( 43, 15, 13, 4, yes, no, no) \
- \
- SC( 44, 16, 14, 1, yes, no, no) \
- SC( 45, 16, 14, 2, yes, no, no) \
- SC( 46, 16, 14, 3, yes, no, no) \
- SC( 47, 16, 14, 4, yes, no, no) \
- \
- SC( 48, 17, 15, 1, yes, no, no) \
- SC( 49, 17, 15, 2, yes, no, no) \
- SC( 50, 17, 15, 3, yes, no, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
- SC(103, 30, 28, 4, yes, no, no) \
- \
- SC(104, 31, 29, 1, yes, no, no) \
- SC(105, 31, 29, 2, yes, no, no) \
- SC(106, 31, 29, 3, yes, no, no) \
- SC(107, 31, 29, 4, yes, no, no) \
- \
- SC(108, 32, 30, 1, yes, no, no) \
- SC(109, 32, 30, 2, yes, no, no) \
- SC(110, 32, 30, 3, yes, no, no) \
- SC(111, 32, 30, 4, yes, no, no) \
- \
- SC(112, 33, 31, 1, yes, no, no) \
- SC(113, 33, 31, 2, yes, no, no) \
- SC(114, 33, 31, 3, yes, no, no) \
- SC(115, 33, 31, 4, yes, no, no) \
- \
- SC(116, 34, 32, 1, yes, no, no) \
- SC(117, 34, 32, 2, yes, no, no) \
- SC(118, 34, 32, 3, yes, no, no) \
- SC(119, 34, 32, 4, yes, no, no) \
- \
- SC(120, 35, 33, 1, yes, no, no) \
- SC(121, 35, 33, 2, yes, no, no) \
- SC(122, 35, 33, 3, yes, no, no) \
- SC(123, 35, 33, 4, yes, no, no) \
- \
- SC(124, 36, 34, 1, yes, no, no) \
- SC(125, 36, 34, 2, yes, no, no) \
- SC(126, 36, 34, 3, yes, no, no) \
- SC(127, 36, 34, 4, yes, no, no) \
- \
- SC(128, 37, 35, 1, yes, no, no) \
- SC(129, 37, 35, 2, yes, no, no) \
- SC(130, 37, 35, 3, yes, no, no) \
- SC(131, 37, 35, 4, yes, no, no) \
- \
- SC(132, 38, 36, 1, yes, no, no) \
- SC(133, 38, 36, 2, yes, no, no) \
- SC(134, 38, 36, 3, yes, no, no) \
- SC(135, 38, 36, 4, yes, no, no) \
- \
- SC(136, 39, 37, 1, yes, no, no) \
- SC(137, 39, 37, 2, yes, no, no) \
- SC(138, 39, 37, 3, yes, no, no) \
- SC(139, 39, 37, 4, yes, no, no) \
- \
- SC(140, 40, 38, 1, yes, no, no) \
- SC(141, 40, 38, 2, yes, no, no) \
- SC(142, 40, 38, 3, yes, no, no) \
- SC(143, 40, 38, 4, yes, no, no) \
- \
- SC(144, 41, 39, 1, yes, no, no) \
- SC(145, 41, 39, 2, yes, no, no) \
- SC(146, 41, 39, 3, yes, no, no) \
- SC(147, 41, 39, 4, yes, no, no) \
- \
- SC(148, 42, 40, 1, yes, no, no) \
- SC(149, 42, 40, 2, yes, no, no) \
- SC(150, 42, 40, 3, yes, no, no) \
- SC(151, 42, 40, 4, yes, no, no) \
- \
- SC(152, 43, 41, 1, yes, no, no) \
- SC(153, 43, 41, 2, yes, no, no) \
- SC(154, 43, 41, 3, yes, no, no) \
- SC(155, 43, 41, 4, yes, no, no) \
- \
- SC(156, 44, 42, 1, yes, no, no) \
- SC(157, 44, 42, 2, yes, no, no) \
- SC(158, 44, 42, 3, yes, no, no) \
- SC(159, 44, 42, 4, yes, no, no) \
- \
- SC(160, 45, 43, 1, yes, no, no) \
- SC(161, 45, 43, 2, yes, no, no) \
- SC(162, 45, 43, 3, yes, no, no) \
- SC(163, 45, 43, 4, yes, no, no) \
- \
- SC(164, 46, 44, 1, yes, no, no) \
- SC(165, 46, 44, 2, yes, no, no) \
- SC(166, 46, 44, 3, yes, no, no) \
- SC(167, 46, 44, 4, yes, no, no) \
- \
- SC(168, 47, 45, 1, yes, no, no) \
- SC(169, 47, 45, 2, yes, no, no) \
- SC(170, 47, 45, 3, yes, no, no) \
- SC(171, 47, 45, 4, yes, no, no) \
- \
- SC(172, 48, 46, 1, yes, no, no) \
- SC(173, 48, 46, 2, yes, no, no) \
- SC(174, 48, 46, 3, yes, no, no) \
- SC(175, 48, 46, 4, yes, no, no) \
- \
- SC(176, 49, 47, 1, yes, no, no) \
- SC(177, 49, 47, 2, yes, no, no) \
- SC(178, 49, 47, 3, yes, no, no) \
- SC(179, 49, 47, 4, yes, no, no) \
- \
- SC(180, 50, 48, 1, yes, no, no) \
- SC(181, 50, 48, 2, yes, no, no) \
- SC(182, 50, 48, 3, yes, no, no) \
- SC(183, 50, 48, 4, yes, no, no) \
- \
- SC(184, 51, 49, 1, yes, no, no) \
- SC(185, 51, 49, 2, yes, no, no) \
- SC(186, 51, 49, 3, yes, no, no) \
- SC(187, 51, 49, 4, yes, no, no) \
- \
- SC(188, 52, 50, 1, yes, no, no) \
- SC(189, 52, 50, 2, yes, no, no) \
- SC(190, 52, 50, 3, yes, no, no) \
- SC(191, 52, 50, 4, yes, no, no) \
- \
- SC(192, 53, 51, 1, yes, no, no) \
- SC(193, 53, 51, 2, yes, no, no) \
- SC(194, 53, 51, 3, yes, no, no) \
- SC(195, 53, 51, 4, yes, no, no) \
- \
- SC(196, 54, 52, 1, yes, no, no) \
- SC(197, 54, 52, 2, yes, no, no) \
- SC(198, 54, 52, 3, yes, no, no) \
- SC(199, 54, 52, 4, yes, no, no) \
- \
- SC(200, 55, 53, 1, yes, no, no) \
- SC(201, 55, 53, 2, yes, no, no) \
- SC(202, 55, 53, 3, yes, no, no) \
- SC(203, 55, 53, 4, yes, no, no) \
- \
- SC(204, 56, 54, 1, yes, no, no) \
- SC(205, 56, 54, 2, yes, no, no) \
- SC(206, 56, 54, 3, yes, no, no) \
- SC(207, 56, 54, 4, yes, no, no) \
- \
- SC(208, 57, 55, 1, yes, no, no) \
- SC(209, 57, 55, 2, yes, no, no) \
- SC(210, 57, 55, 3, yes, no, no) \
- SC(211, 57, 55, 4, yes, no, no) \
- \
- SC(212, 58, 56, 1, yes, no, no) \
- SC(213, 58, 56, 2, yes, no, no) \
- SC(214, 58, 56, 3, yes, no, no) \
- SC(215, 58, 56, 4, yes, no, no) \
- \
- SC(216, 59, 57, 1, yes, no, no) \
- SC(217, 59, 57, 2, yes, no, no) \
- SC(218, 59, 57, 3, yes, no, no) \
- SC(219, 59, 57, 4, yes, no, no) \
- \
- SC(220, 60, 58, 1, yes, no, no) \
- SC(221, 60, 58, 2, yes, no, no) \
- SC(222, 60, 58, 3, yes, no, no) \
- SC(223, 60, 58, 4, yes, no, no) \
- \
- SC(224, 61, 59, 1, yes, no, no) \
- SC(225, 61, 59, 2, yes, no, no) \
- SC(226, 61, 59, 3, yes, no, no) \
- SC(227, 61, 59, 4, yes, no, no) \
- \
- SC(228, 62, 60, 1, yes, no, no) \
- SC(229, 62, 60, 2, yes, no, no) \
- SC(230, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, yes, yes, 1, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, no, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, yes, yes, 2, no) \
+ \
+ SC( 36, 14, 12, 1, no, yes, 5, no) \
+ SC( 37, 14, 12, 2, yes, yes, 3, no) \
+ SC( 38, 14, 12, 3, no, yes, 7, no) \
+ SC( 39, 14, 12, 4, yes, no, 0, no) \
+ \
+ SC( 40, 15, 13, 1, yes, no, 0, no) \
+ SC( 41, 15, 13, 2, yes, no, 0, no) \
+ SC( 42, 15, 13, 3, yes, no, 0, no) \
+ SC( 43, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 44, 16, 14, 1, yes, no, 0, no) \
+ SC( 45, 16, 14, 2, yes, no, 0, no) \
+ SC( 46, 16, 14, 3, yes, no, 0, no) \
+ SC( 47, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 48, 17, 15, 1, yes, no, 0, no) \
+ SC( 49, 17, 15, 2, yes, no, 0, no) \
+ SC( 50, 17, 15, 3, yes, no, 0, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
+ SC(103, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(104, 31, 29, 1, yes, no, 0, no) \
+ SC(105, 31, 29, 2, yes, no, 0, no) \
+ SC(106, 31, 29, 3, yes, no, 0, no) \
+ SC(107, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(108, 32, 30, 1, yes, no, 0, no) \
+ SC(109, 32, 30, 2, yes, no, 0, no) \
+ SC(110, 32, 30, 3, yes, no, 0, no) \
+ SC(111, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(112, 33, 31, 1, yes, no, 0, no) \
+ SC(113, 33, 31, 2, yes, no, 0, no) \
+ SC(114, 33, 31, 3, yes, no, 0, no) \
+ SC(115, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(116, 34, 32, 1, yes, no, 0, no) \
+ SC(117, 34, 32, 2, yes, no, 0, no) \
+ SC(118, 34, 32, 3, yes, no, 0, no) \
+ SC(119, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(120, 35, 33, 1, yes, no, 0, no) \
+ SC(121, 35, 33, 2, yes, no, 0, no) \
+ SC(122, 35, 33, 3, yes, no, 0, no) \
+ SC(123, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(124, 36, 34, 1, yes, no, 0, no) \
+ SC(125, 36, 34, 2, yes, no, 0, no) \
+ SC(126, 36, 34, 3, yes, no, 0, no) \
+ SC(127, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(128, 37, 35, 1, yes, no, 0, no) \
+ SC(129, 37, 35, 2, yes, no, 0, no) \
+ SC(130, 37, 35, 3, yes, no, 0, no) \
+ SC(131, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(132, 38, 36, 1, yes, no, 0, no) \
+ SC(133, 38, 36, 2, yes, no, 0, no) \
+ SC(134, 38, 36, 3, yes, no, 0, no) \
+ SC(135, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(136, 39, 37, 1, yes, no, 0, no) \
+ SC(137, 39, 37, 2, yes, no, 0, no) \
+ SC(138, 39, 37, 3, yes, no, 0, no) \
+ SC(139, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(140, 40, 38, 1, yes, no, 0, no) \
+ SC(141, 40, 38, 2, yes, no, 0, no) \
+ SC(142, 40, 38, 3, yes, no, 0, no) \
+ SC(143, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(144, 41, 39, 1, yes, no, 0, no) \
+ SC(145, 41, 39, 2, yes, no, 0, no) \
+ SC(146, 41, 39, 3, yes, no, 0, no) \
+ SC(147, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(148, 42, 40, 1, yes, no, 0, no) \
+ SC(149, 42, 40, 2, yes, no, 0, no) \
+ SC(150, 42, 40, 3, yes, no, 0, no) \
+ SC(151, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(152, 43, 41, 1, yes, no, 0, no) \
+ SC(153, 43, 41, 2, yes, no, 0, no) \
+ SC(154, 43, 41, 3, yes, no, 0, no) \
+ SC(155, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(156, 44, 42, 1, yes, no, 0, no) \
+ SC(157, 44, 42, 2, yes, no, 0, no) \
+ SC(158, 44, 42, 3, yes, no, 0, no) \
+ SC(159, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(160, 45, 43, 1, yes, no, 0, no) \
+ SC(161, 45, 43, 2, yes, no, 0, no) \
+ SC(162, 45, 43, 3, yes, no, 0, no) \
+ SC(163, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(164, 46, 44, 1, yes, no, 0, no) \
+ SC(165, 46, 44, 2, yes, no, 0, no) \
+ SC(166, 46, 44, 3, yes, no, 0, no) \
+ SC(167, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(168, 47, 45, 1, yes, no, 0, no) \
+ SC(169, 47, 45, 2, yes, no, 0, no) \
+ SC(170, 47, 45, 3, yes, no, 0, no) \
+ SC(171, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(172, 48, 46, 1, yes, no, 0, no) \
+ SC(173, 48, 46, 2, yes, no, 0, no) \
+ SC(174, 48, 46, 3, yes, no, 0, no) \
+ SC(175, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(176, 49, 47, 1, yes, no, 0, no) \
+ SC(177, 49, 47, 2, yes, no, 0, no) \
+ SC(178, 49, 47, 3, yes, no, 0, no) \
+ SC(179, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(180, 50, 48, 1, yes, no, 0, no) \
+ SC(181, 50, 48, 2, yes, no, 0, no) \
+ SC(182, 50, 48, 3, yes, no, 0, no) \
+ SC(183, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(184, 51, 49, 1, yes, no, 0, no) \
+ SC(185, 51, 49, 2, yes, no, 0, no) \
+ SC(186, 51, 49, 3, yes, no, 0, no) \
+ SC(187, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(188, 52, 50, 1, yes, no, 0, no) \
+ SC(189, 52, 50, 2, yes, no, 0, no) \
+ SC(190, 52, 50, 3, yes, no, 0, no) \
+ SC(191, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(192, 53, 51, 1, yes, no, 0, no) \
+ SC(193, 53, 51, 2, yes, no, 0, no) \
+ SC(194, 53, 51, 3, yes, no, 0, no) \
+ SC(195, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(196, 54, 52, 1, yes, no, 0, no) \
+ SC(197, 54, 52, 2, yes, no, 0, no) \
+ SC(198, 54, 52, 3, yes, no, 0, no) \
+ SC(199, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(200, 55, 53, 1, yes, no, 0, no) \
+ SC(201, 55, 53, 2, yes, no, 0, no) \
+ SC(202, 55, 53, 3, yes, no, 0, no) \
+ SC(203, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(204, 56, 54, 1, yes, no, 0, no) \
+ SC(205, 56, 54, 2, yes, no, 0, no) \
+ SC(206, 56, 54, 3, yes, no, 0, no) \
+ SC(207, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(208, 57, 55, 1, yes, no, 0, no) \
+ SC(209, 57, 55, 2, yes, no, 0, no) \
+ SC(210, 57, 55, 3, yes, no, 0, no) \
+ SC(211, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(212, 58, 56, 1, yes, no, 0, no) \
+ SC(213, 58, 56, 2, yes, no, 0, no) \
+ SC(214, 58, 56, 3, yes, no, 0, no) \
+ SC(215, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(216, 59, 57, 1, yes, no, 0, no) \
+ SC(217, 59, 57, 2, yes, no, 0, no) \
+ SC(218, 59, 57, 3, yes, no, 0, no) \
+ SC(219, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(220, 60, 58, 1, yes, no, 0, no) \
+ SC(221, 60, 58, 2, yes, no, 0, no) \
+ SC(222, 60, 58, 3, yes, no, 0, no) \
+ SC(223, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(224, 61, 59, 1, yes, no, 0, no) \
+ SC(225, 61, 59, 2, yes, no, 0, no) \
+ SC(226, 61, 59, 3, yes, no, 0, no) \
+ SC(227, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(228, 62, 60, 1, yes, no, 0, no) \
+ SC(229, 62, 60, 2, yes, no, 0, no) \
+ SC(230, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 39
-#define NSIZES 231
-#define NPSIZES 195
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
-#define LG_LARGE_MINCLASS 15
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 39
+#define NSIZES 231
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 195
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 14) + (((size_t)3) << 12))
+#define LG_LARGE_MINCLASS 15
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, no, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, no, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, no, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, yes, yes, no) \
- \
- SC( 36, 14, 12, 1, no, yes, no) \
- SC( 37, 14, 12, 2, no, yes, no) \
- SC( 38, 14, 12, 3, no, yes, no) \
- SC( 39, 14, 12, 4, yes, yes, no) \
- \
- SC( 40, 15, 13, 1, no, yes, no) \
- SC( 41, 15, 13, 2, yes, yes, no) \
- SC( 42, 15, 13, 3, no, yes, no) \
- SC( 43, 15, 13, 4, yes, no, no) \
- \
- SC( 44, 16, 14, 1, yes, no, no) \
- SC( 45, 16, 14, 2, yes, no, no) \
- SC( 46, 16, 14, 3, yes, no, no) \
- SC( 47, 16, 14, 4, yes, no, no) \
- \
- SC( 48, 17, 15, 1, yes, no, no) \
- SC( 49, 17, 15, 2, yes, no, no) \
- SC( 50, 17, 15, 3, yes, no, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
- SC(103, 30, 28, 4, yes, no, no) \
- \
- SC(104, 31, 29, 1, yes, no, no) \
- SC(105, 31, 29, 2, yes, no, no) \
- SC(106, 31, 29, 3, yes, no, no) \
- SC(107, 31, 29, 4, yes, no, no) \
- \
- SC(108, 32, 30, 1, yes, no, no) \
- SC(109, 32, 30, 2, yes, no, no) \
- SC(110, 32, 30, 3, yes, no, no) \
- SC(111, 32, 30, 4, yes, no, no) \
- \
- SC(112, 33, 31, 1, yes, no, no) \
- SC(113, 33, 31, 2, yes, no, no) \
- SC(114, 33, 31, 3, yes, no, no) \
- SC(115, 33, 31, 4, yes, no, no) \
- \
- SC(116, 34, 32, 1, yes, no, no) \
- SC(117, 34, 32, 2, yes, no, no) \
- SC(118, 34, 32, 3, yes, no, no) \
- SC(119, 34, 32, 4, yes, no, no) \
- \
- SC(120, 35, 33, 1, yes, no, no) \
- SC(121, 35, 33, 2, yes, no, no) \
- SC(122, 35, 33, 3, yes, no, no) \
- SC(123, 35, 33, 4, yes, no, no) \
- \
- SC(124, 36, 34, 1, yes, no, no) \
- SC(125, 36, 34, 2, yes, no, no) \
- SC(126, 36, 34, 3, yes, no, no) \
- SC(127, 36, 34, 4, yes, no, no) \
- \
- SC(128, 37, 35, 1, yes, no, no) \
- SC(129, 37, 35, 2, yes, no, no) \
- SC(130, 37, 35, 3, yes, no, no) \
- SC(131, 37, 35, 4, yes, no, no) \
- \
- SC(132, 38, 36, 1, yes, no, no) \
- SC(133, 38, 36, 2, yes, no, no) \
- SC(134, 38, 36, 3, yes, no, no) \
- SC(135, 38, 36, 4, yes, no, no) \
- \
- SC(136, 39, 37, 1, yes, no, no) \
- SC(137, 39, 37, 2, yes, no, no) \
- SC(138, 39, 37, 3, yes, no, no) \
- SC(139, 39, 37, 4, yes, no, no) \
- \
- SC(140, 40, 38, 1, yes, no, no) \
- SC(141, 40, 38, 2, yes, no, no) \
- SC(142, 40, 38, 3, yes, no, no) \
- SC(143, 40, 38, 4, yes, no, no) \
- \
- SC(144, 41, 39, 1, yes, no, no) \
- SC(145, 41, 39, 2, yes, no, no) \
- SC(146, 41, 39, 3, yes, no, no) \
- SC(147, 41, 39, 4, yes, no, no) \
- \
- SC(148, 42, 40, 1, yes, no, no) \
- SC(149, 42, 40, 2, yes, no, no) \
- SC(150, 42, 40, 3, yes, no, no) \
- SC(151, 42, 40, 4, yes, no, no) \
- \
- SC(152, 43, 41, 1, yes, no, no) \
- SC(153, 43, 41, 2, yes, no, no) \
- SC(154, 43, 41, 3, yes, no, no) \
- SC(155, 43, 41, 4, yes, no, no) \
- \
- SC(156, 44, 42, 1, yes, no, no) \
- SC(157, 44, 42, 2, yes, no, no) \
- SC(158, 44, 42, 3, yes, no, no) \
- SC(159, 44, 42, 4, yes, no, no) \
- \
- SC(160, 45, 43, 1, yes, no, no) \
- SC(161, 45, 43, 2, yes, no, no) \
- SC(162, 45, 43, 3, yes, no, no) \
- SC(163, 45, 43, 4, yes, no, no) \
- \
- SC(164, 46, 44, 1, yes, no, no) \
- SC(165, 46, 44, 2, yes, no, no) \
- SC(166, 46, 44, 3, yes, no, no) \
- SC(167, 46, 44, 4, yes, no, no) \
- \
- SC(168, 47, 45, 1, yes, no, no) \
- SC(169, 47, 45, 2, yes, no, no) \
- SC(170, 47, 45, 3, yes, no, no) \
- SC(171, 47, 45, 4, yes, no, no) \
- \
- SC(172, 48, 46, 1, yes, no, no) \
- SC(173, 48, 46, 2, yes, no, no) \
- SC(174, 48, 46, 3, yes, no, no) \
- SC(175, 48, 46, 4, yes, no, no) \
- \
- SC(176, 49, 47, 1, yes, no, no) \
- SC(177, 49, 47, 2, yes, no, no) \
- SC(178, 49, 47, 3, yes, no, no) \
- SC(179, 49, 47, 4, yes, no, no) \
- \
- SC(180, 50, 48, 1, yes, no, no) \
- SC(181, 50, 48, 2, yes, no, no) \
- SC(182, 50, 48, 3, yes, no, no) \
- SC(183, 50, 48, 4, yes, no, no) \
- \
- SC(184, 51, 49, 1, yes, no, no) \
- SC(185, 51, 49, 2, yes, no, no) \
- SC(186, 51, 49, 3, yes, no, no) \
- SC(187, 51, 49, 4, yes, no, no) \
- \
- SC(188, 52, 50, 1, yes, no, no) \
- SC(189, 52, 50, 2, yes, no, no) \
- SC(190, 52, 50, 3, yes, no, no) \
- SC(191, 52, 50, 4, yes, no, no) \
- \
- SC(192, 53, 51, 1, yes, no, no) \
- SC(193, 53, 51, 2, yes, no, no) \
- SC(194, 53, 51, 3, yes, no, no) \
- SC(195, 53, 51, 4, yes, no, no) \
- \
- SC(196, 54, 52, 1, yes, no, no) \
- SC(197, 54, 52, 2, yes, no, no) \
- SC(198, 54, 52, 3, yes, no, no) \
- SC(199, 54, 52, 4, yes, no, no) \
- \
- SC(200, 55, 53, 1, yes, no, no) \
- SC(201, 55, 53, 2, yes, no, no) \
- SC(202, 55, 53, 3, yes, no, no) \
- SC(203, 55, 53, 4, yes, no, no) \
- \
- SC(204, 56, 54, 1, yes, no, no) \
- SC(205, 56, 54, 2, yes, no, no) \
- SC(206, 56, 54, 3, yes, no, no) \
- SC(207, 56, 54, 4, yes, no, no) \
- \
- SC(208, 57, 55, 1, yes, no, no) \
- SC(209, 57, 55, 2, yes, no, no) \
- SC(210, 57, 55, 3, yes, no, no) \
- SC(211, 57, 55, 4, yes, no, no) \
- \
- SC(212, 58, 56, 1, yes, no, no) \
- SC(213, 58, 56, 2, yes, no, no) \
- SC(214, 58, 56, 3, yes, no, no) \
- SC(215, 58, 56, 4, yes, no, no) \
- \
- SC(216, 59, 57, 1, yes, no, no) \
- SC(217, 59, 57, 2, yes, no, no) \
- SC(218, 59, 57, 3, yes, no, no) \
- SC(219, 59, 57, 4, yes, no, no) \
- \
- SC(220, 60, 58, 1, yes, no, no) \
- SC(221, 60, 58, 2, yes, no, no) \
- SC(222, 60, 58, 3, yes, no, no) \
- SC(223, 60, 58, 4, yes, no, no) \
- \
- SC(224, 61, 59, 1, yes, no, no) \
- SC(225, 61, 59, 2, yes, no, no) \
- SC(226, 61, 59, 3, yes, no, no) \
- SC(227, 61, 59, 4, yes, no, no) \
- \
- SC(228, 62, 60, 1, yes, no, no) \
- SC(229, 62, 60, 2, yes, no, no) \
- SC(230, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, no, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, yes, yes, 1, no) \
+ \
+ SC( 36, 14, 12, 1, no, yes, 5, no) \
+ SC( 37, 14, 12, 2, no, yes, 3, no) \
+ SC( 38, 14, 12, 3, no, yes, 7, no) \
+ SC( 39, 14, 12, 4, yes, yes, 2, no) \
+ \
+ SC( 40, 15, 13, 1, no, yes, 5, no) \
+ SC( 41, 15, 13, 2, yes, yes, 3, no) \
+ SC( 42, 15, 13, 3, no, yes, 7, no) \
+ SC( 43, 15, 13, 4, yes, no, 0, no) \
+ \
+ SC( 44, 16, 14, 1, yes, no, 0, no) \
+ SC( 45, 16, 14, 2, yes, no, 0, no) \
+ SC( 46, 16, 14, 3, yes, no, 0, no) \
+ SC( 47, 16, 14, 4, yes, no, 0, no) \
+ \
+ SC( 48, 17, 15, 1, yes, no, 0, no) \
+ SC( 49, 17, 15, 2, yes, no, 0, no) \
+ SC( 50, 17, 15, 3, yes, no, 0, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
+ SC(103, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(104, 31, 29, 1, yes, no, 0, no) \
+ SC(105, 31, 29, 2, yes, no, 0, no) \
+ SC(106, 31, 29, 3, yes, no, 0, no) \
+ SC(107, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(108, 32, 30, 1, yes, no, 0, no) \
+ SC(109, 32, 30, 2, yes, no, 0, no) \
+ SC(110, 32, 30, 3, yes, no, 0, no) \
+ SC(111, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(112, 33, 31, 1, yes, no, 0, no) \
+ SC(113, 33, 31, 2, yes, no, 0, no) \
+ SC(114, 33, 31, 3, yes, no, 0, no) \
+ SC(115, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(116, 34, 32, 1, yes, no, 0, no) \
+ SC(117, 34, 32, 2, yes, no, 0, no) \
+ SC(118, 34, 32, 3, yes, no, 0, no) \
+ SC(119, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(120, 35, 33, 1, yes, no, 0, no) \
+ SC(121, 35, 33, 2, yes, no, 0, no) \
+ SC(122, 35, 33, 3, yes, no, 0, no) \
+ SC(123, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(124, 36, 34, 1, yes, no, 0, no) \
+ SC(125, 36, 34, 2, yes, no, 0, no) \
+ SC(126, 36, 34, 3, yes, no, 0, no) \
+ SC(127, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(128, 37, 35, 1, yes, no, 0, no) \
+ SC(129, 37, 35, 2, yes, no, 0, no) \
+ SC(130, 37, 35, 3, yes, no, 0, no) \
+ SC(131, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(132, 38, 36, 1, yes, no, 0, no) \
+ SC(133, 38, 36, 2, yes, no, 0, no) \
+ SC(134, 38, 36, 3, yes, no, 0, no) \
+ SC(135, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(136, 39, 37, 1, yes, no, 0, no) \
+ SC(137, 39, 37, 2, yes, no, 0, no) \
+ SC(138, 39, 37, 3, yes, no, 0, no) \
+ SC(139, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(140, 40, 38, 1, yes, no, 0, no) \
+ SC(141, 40, 38, 2, yes, no, 0, no) \
+ SC(142, 40, 38, 3, yes, no, 0, no) \
+ SC(143, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(144, 41, 39, 1, yes, no, 0, no) \
+ SC(145, 41, 39, 2, yes, no, 0, no) \
+ SC(146, 41, 39, 3, yes, no, 0, no) \
+ SC(147, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(148, 42, 40, 1, yes, no, 0, no) \
+ SC(149, 42, 40, 2, yes, no, 0, no) \
+ SC(150, 42, 40, 3, yes, no, 0, no) \
+ SC(151, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(152, 43, 41, 1, yes, no, 0, no) \
+ SC(153, 43, 41, 2, yes, no, 0, no) \
+ SC(154, 43, 41, 3, yes, no, 0, no) \
+ SC(155, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(156, 44, 42, 1, yes, no, 0, no) \
+ SC(157, 44, 42, 2, yes, no, 0, no) \
+ SC(158, 44, 42, 3, yes, no, 0, no) \
+ SC(159, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(160, 45, 43, 1, yes, no, 0, no) \
+ SC(161, 45, 43, 2, yes, no, 0, no) \
+ SC(162, 45, 43, 3, yes, no, 0, no) \
+ SC(163, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(164, 46, 44, 1, yes, no, 0, no) \
+ SC(165, 46, 44, 2, yes, no, 0, no) \
+ SC(166, 46, 44, 3, yes, no, 0, no) \
+ SC(167, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(168, 47, 45, 1, yes, no, 0, no) \
+ SC(169, 47, 45, 2, yes, no, 0, no) \
+ SC(170, 47, 45, 3, yes, no, 0, no) \
+ SC(171, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(172, 48, 46, 1, yes, no, 0, no) \
+ SC(173, 48, 46, 2, yes, no, 0, no) \
+ SC(174, 48, 46, 3, yes, no, 0, no) \
+ SC(175, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(176, 49, 47, 1, yes, no, 0, no) \
+ SC(177, 49, 47, 2, yes, no, 0, no) \
+ SC(178, 49, 47, 3, yes, no, 0, no) \
+ SC(179, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(180, 50, 48, 1, yes, no, 0, no) \
+ SC(181, 50, 48, 2, yes, no, 0, no) \
+ SC(182, 50, 48, 3, yes, no, 0, no) \
+ SC(183, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(184, 51, 49, 1, yes, no, 0, no) \
+ SC(185, 51, 49, 2, yes, no, 0, no) \
+ SC(186, 51, 49, 3, yes, no, 0, no) \
+ SC(187, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(188, 52, 50, 1, yes, no, 0, no) \
+ SC(189, 52, 50, 2, yes, no, 0, no) \
+ SC(190, 52, 50, 3, yes, no, 0, no) \
+ SC(191, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(192, 53, 51, 1, yes, no, 0, no) \
+ SC(193, 53, 51, 2, yes, no, 0, no) \
+ SC(194, 53, 51, 3, yes, no, 0, no) \
+ SC(195, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(196, 54, 52, 1, yes, no, 0, no) \
+ SC(197, 54, 52, 2, yes, no, 0, no) \
+ SC(198, 54, 52, 3, yes, no, 0, no) \
+ SC(199, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(200, 55, 53, 1, yes, no, 0, no) \
+ SC(201, 55, 53, 2, yes, no, 0, no) \
+ SC(202, 55, 53, 3, yes, no, 0, no) \
+ SC(203, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(204, 56, 54, 1, yes, no, 0, no) \
+ SC(205, 56, 54, 2, yes, no, 0, no) \
+ SC(206, 56, 54, 3, yes, no, 0, no) \
+ SC(207, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(208, 57, 55, 1, yes, no, 0, no) \
+ SC(209, 57, 55, 2, yes, no, 0, no) \
+ SC(210, 57, 55, 3, yes, no, 0, no) \
+ SC(211, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(212, 58, 56, 1, yes, no, 0, no) \
+ SC(213, 58, 56, 2, yes, no, 0, no) \
+ SC(214, 58, 56, 3, yes, no, 0, no) \
+ SC(215, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(216, 59, 57, 1, yes, no, 0, no) \
+ SC(217, 59, 57, 2, yes, no, 0, no) \
+ SC(218, 59, 57, 3, yes, no, 0, no) \
+ SC(219, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(220, 60, 58, 1, yes, no, 0, no) \
+ SC(221, 60, 58, 2, yes, no, 0, no) \
+ SC(222, 60, 58, 3, yes, no, 0, no) \
+ SC(223, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(224, 61, 59, 1, yes, no, 0, no) \
+ SC(225, 61, 59, 2, yes, no, 0, no) \
+ SC(226, 61, 59, 3, yes, no, 0, no) \
+ SC(227, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(228, 62, 60, 1, yes, no, 0, no) \
+ SC(229, 62, 60, 2, yes, no, 0, no) \
+ SC(230, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 43
-#define NSIZES 231
-#define NPSIZES 191
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
-#define LG_LARGE_MINCLASS 16
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 43
+#define NSIZES 231
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 191
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
+#define LG_LARGE_MINCLASS 16
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
-#define SIZE_CLASSES \
- /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \
- SC( 0, 4, 4, 0, no, yes, 4) \
- SC( 1, 4, 4, 1, no, yes, 4) \
- SC( 2, 4, 4, 2, no, yes, 4) \
- SC( 3, 4, 4, 3, no, yes, 4) \
- \
- SC( 4, 6, 4, 1, no, yes, 4) \
- SC( 5, 6, 4, 2, no, yes, 4) \
- SC( 6, 6, 4, 3, no, yes, 4) \
- SC( 7, 6, 4, 4, no, yes, 4) \
- \
- SC( 8, 7, 5, 1, no, yes, 5) \
- SC( 9, 7, 5, 2, no, yes, 5) \
- SC( 10, 7, 5, 3, no, yes, 5) \
- SC( 11, 7, 5, 4, no, yes, 5) \
- \
- SC( 12, 8, 6, 1, no, yes, 6) \
- SC( 13, 8, 6, 2, no, yes, 6) \
- SC( 14, 8, 6, 3, no, yes, 6) \
- SC( 15, 8, 6, 4, no, yes, 6) \
- \
- SC( 16, 9, 7, 1, no, yes, 7) \
- SC( 17, 9, 7, 2, no, yes, 7) \
- SC( 18, 9, 7, 3, no, yes, 7) \
- SC( 19, 9, 7, 4, no, yes, 7) \
- \
- SC( 20, 10, 8, 1, no, yes, 8) \
- SC( 21, 10, 8, 2, no, yes, 8) \
- SC( 22, 10, 8, 3, no, yes, 8) \
- SC( 23, 10, 8, 4, no, yes, 8) \
- \
- SC( 24, 11, 9, 1, no, yes, 9) \
- SC( 25, 11, 9, 2, no, yes, 9) \
- SC( 26, 11, 9, 3, no, yes, 9) \
- SC( 27, 11, 9, 4, no, yes, 9) \
- \
- SC( 28, 12, 10, 1, no, yes, no) \
- SC( 29, 12, 10, 2, no, yes, no) \
- SC( 30, 12, 10, 3, no, yes, no) \
- SC( 31, 12, 10, 4, no, yes, no) \
- \
- SC( 32, 13, 11, 1, no, yes, no) \
- SC( 33, 13, 11, 2, no, yes, no) \
- SC( 34, 13, 11, 3, no, yes, no) \
- SC( 35, 13, 11, 4, no, yes, no) \
- \
- SC( 36, 14, 12, 1, no, yes, no) \
- SC( 37, 14, 12, 2, no, yes, no) \
- SC( 38, 14, 12, 3, no, yes, no) \
- SC( 39, 14, 12, 4, no, yes, no) \
- \
- SC( 40, 15, 13, 1, no, yes, no) \
- SC( 41, 15, 13, 2, no, yes, no) \
- SC( 42, 15, 13, 3, no, yes, no) \
- SC( 43, 15, 13, 4, yes, yes, no) \
- \
- SC( 44, 16, 14, 1, no, yes, no) \
- SC( 45, 16, 14, 2, no, yes, no) \
- SC( 46, 16, 14, 3, no, yes, no) \
- SC( 47, 16, 14, 4, yes, yes, no) \
- \
- SC( 48, 17, 15, 1, no, yes, no) \
- SC( 49, 17, 15, 2, yes, yes, no) \
- SC( 50, 17, 15, 3, no, yes, no) \
- SC( 51, 17, 15, 4, yes, no, no) \
- \
- SC( 52, 18, 16, 1, yes, no, no) \
- SC( 53, 18, 16, 2, yes, no, no) \
- SC( 54, 18, 16, 3, yes, no, no) \
- SC( 55, 18, 16, 4, yes, no, no) \
- \
- SC( 56, 19, 17, 1, yes, no, no) \
- SC( 57, 19, 17, 2, yes, no, no) \
- SC( 58, 19, 17, 3, yes, no, no) \
- SC( 59, 19, 17, 4, yes, no, no) \
- \
- SC( 60, 20, 18, 1, yes, no, no) \
- SC( 61, 20, 18, 2, yes, no, no) \
- SC( 62, 20, 18, 3, yes, no, no) \
- SC( 63, 20, 18, 4, yes, no, no) \
- \
- SC( 64, 21, 19, 1, yes, no, no) \
- SC( 65, 21, 19, 2, yes, no, no) \
- SC( 66, 21, 19, 3, yes, no, no) \
- SC( 67, 21, 19, 4, yes, no, no) \
- \
- SC( 68, 22, 20, 1, yes, no, no) \
- SC( 69, 22, 20, 2, yes, no, no) \
- SC( 70, 22, 20, 3, yes, no, no) \
- SC( 71, 22, 20, 4, yes, no, no) \
- \
- SC( 72, 23, 21, 1, yes, no, no) \
- SC( 73, 23, 21, 2, yes, no, no) \
- SC( 74, 23, 21, 3, yes, no, no) \
- SC( 75, 23, 21, 4, yes, no, no) \
- \
- SC( 76, 24, 22, 1, yes, no, no) \
- SC( 77, 24, 22, 2, yes, no, no) \
- SC( 78, 24, 22, 3, yes, no, no) \
- SC( 79, 24, 22, 4, yes, no, no) \
- \
- SC( 80, 25, 23, 1, yes, no, no) \
- SC( 81, 25, 23, 2, yes, no, no) \
- SC( 82, 25, 23, 3, yes, no, no) \
- SC( 83, 25, 23, 4, yes, no, no) \
- \
- SC( 84, 26, 24, 1, yes, no, no) \
- SC( 85, 26, 24, 2, yes, no, no) \
- SC( 86, 26, 24, 3, yes, no, no) \
- SC( 87, 26, 24, 4, yes, no, no) \
- \
- SC( 88, 27, 25, 1, yes, no, no) \
- SC( 89, 27, 25, 2, yes, no, no) \
- SC( 90, 27, 25, 3, yes, no, no) \
- SC( 91, 27, 25, 4, yes, no, no) \
- \
- SC( 92, 28, 26, 1, yes, no, no) \
- SC( 93, 28, 26, 2, yes, no, no) \
- SC( 94, 28, 26, 3, yes, no, no) \
- SC( 95, 28, 26, 4, yes, no, no) \
- \
- SC( 96, 29, 27, 1, yes, no, no) \
- SC( 97, 29, 27, 2, yes, no, no) \
- SC( 98, 29, 27, 3, yes, no, no) \
- SC( 99, 29, 27, 4, yes, no, no) \
- \
- SC(100, 30, 28, 1, yes, no, no) \
- SC(101, 30, 28, 2, yes, no, no) \
- SC(102, 30, 28, 3, yes, no, no) \
- SC(103, 30, 28, 4, yes, no, no) \
- \
- SC(104, 31, 29, 1, yes, no, no) \
- SC(105, 31, 29, 2, yes, no, no) \
- SC(106, 31, 29, 3, yes, no, no) \
- SC(107, 31, 29, 4, yes, no, no) \
- \
- SC(108, 32, 30, 1, yes, no, no) \
- SC(109, 32, 30, 2, yes, no, no) \
- SC(110, 32, 30, 3, yes, no, no) \
- SC(111, 32, 30, 4, yes, no, no) \
- \
- SC(112, 33, 31, 1, yes, no, no) \
- SC(113, 33, 31, 2, yes, no, no) \
- SC(114, 33, 31, 3, yes, no, no) \
- SC(115, 33, 31, 4, yes, no, no) \
- \
- SC(116, 34, 32, 1, yes, no, no) \
- SC(117, 34, 32, 2, yes, no, no) \
- SC(118, 34, 32, 3, yes, no, no) \
- SC(119, 34, 32, 4, yes, no, no) \
- \
- SC(120, 35, 33, 1, yes, no, no) \
- SC(121, 35, 33, 2, yes, no, no) \
- SC(122, 35, 33, 3, yes, no, no) \
- SC(123, 35, 33, 4, yes, no, no) \
- \
- SC(124, 36, 34, 1, yes, no, no) \
- SC(125, 36, 34, 2, yes, no, no) \
- SC(126, 36, 34, 3, yes, no, no) \
- SC(127, 36, 34, 4, yes, no, no) \
- \
- SC(128, 37, 35, 1, yes, no, no) \
- SC(129, 37, 35, 2, yes, no, no) \
- SC(130, 37, 35, 3, yes, no, no) \
- SC(131, 37, 35, 4, yes, no, no) \
- \
- SC(132, 38, 36, 1, yes, no, no) \
- SC(133, 38, 36, 2, yes, no, no) \
- SC(134, 38, 36, 3, yes, no, no) \
- SC(135, 38, 36, 4, yes, no, no) \
- \
- SC(136, 39, 37, 1, yes, no, no) \
- SC(137, 39, 37, 2, yes, no, no) \
- SC(138, 39, 37, 3, yes, no, no) \
- SC(139, 39, 37, 4, yes, no, no) \
- \
- SC(140, 40, 38, 1, yes, no, no) \
- SC(141, 40, 38, 2, yes, no, no) \
- SC(142, 40, 38, 3, yes, no, no) \
- SC(143, 40, 38, 4, yes, no, no) \
- \
- SC(144, 41, 39, 1, yes, no, no) \
- SC(145, 41, 39, 2, yes, no, no) \
- SC(146, 41, 39, 3, yes, no, no) \
- SC(147, 41, 39, 4, yes, no, no) \
- \
- SC(148, 42, 40, 1, yes, no, no) \
- SC(149, 42, 40, 2, yes, no, no) \
- SC(150, 42, 40, 3, yes, no, no) \
- SC(151, 42, 40, 4, yes, no, no) \
- \
- SC(152, 43, 41, 1, yes, no, no) \
- SC(153, 43, 41, 2, yes, no, no) \
- SC(154, 43, 41, 3, yes, no, no) \
- SC(155, 43, 41, 4, yes, no, no) \
- \
- SC(156, 44, 42, 1, yes, no, no) \
- SC(157, 44, 42, 2, yes, no, no) \
- SC(158, 44, 42, 3, yes, no, no) \
- SC(159, 44, 42, 4, yes, no, no) \
- \
- SC(160, 45, 43, 1, yes, no, no) \
- SC(161, 45, 43, 2, yes, no, no) \
- SC(162, 45, 43, 3, yes, no, no) \
- SC(163, 45, 43, 4, yes, no, no) \
- \
- SC(164, 46, 44, 1, yes, no, no) \
- SC(165, 46, 44, 2, yes, no, no) \
- SC(166, 46, 44, 3, yes, no, no) \
- SC(167, 46, 44, 4, yes, no, no) \
- \
- SC(168, 47, 45, 1, yes, no, no) \
- SC(169, 47, 45, 2, yes, no, no) \
- SC(170, 47, 45, 3, yes, no, no) \
- SC(171, 47, 45, 4, yes, no, no) \
- \
- SC(172, 48, 46, 1, yes, no, no) \
- SC(173, 48, 46, 2, yes, no, no) \
- SC(174, 48, 46, 3, yes, no, no) \
- SC(175, 48, 46, 4, yes, no, no) \
- \
- SC(176, 49, 47, 1, yes, no, no) \
- SC(177, 49, 47, 2, yes, no, no) \
- SC(178, 49, 47, 3, yes, no, no) \
- SC(179, 49, 47, 4, yes, no, no) \
- \
- SC(180, 50, 48, 1, yes, no, no) \
- SC(181, 50, 48, 2, yes, no, no) \
- SC(182, 50, 48, 3, yes, no, no) \
- SC(183, 50, 48, 4, yes, no, no) \
- \
- SC(184, 51, 49, 1, yes, no, no) \
- SC(185, 51, 49, 2, yes, no, no) \
- SC(186, 51, 49, 3, yes, no, no) \
- SC(187, 51, 49, 4, yes, no, no) \
- \
- SC(188, 52, 50, 1, yes, no, no) \
- SC(189, 52, 50, 2, yes, no, no) \
- SC(190, 52, 50, 3, yes, no, no) \
- SC(191, 52, 50, 4, yes, no, no) \
- \
- SC(192, 53, 51, 1, yes, no, no) \
- SC(193, 53, 51, 2, yes, no, no) \
- SC(194, 53, 51, 3, yes, no, no) \
- SC(195, 53, 51, 4, yes, no, no) \
- \
- SC(196, 54, 52, 1, yes, no, no) \
- SC(197, 54, 52, 2, yes, no, no) \
- SC(198, 54, 52, 3, yes, no, no) \
- SC(199, 54, 52, 4, yes, no, no) \
- \
- SC(200, 55, 53, 1, yes, no, no) \
- SC(201, 55, 53, 2, yes, no, no) \
- SC(202, 55, 53, 3, yes, no, no) \
- SC(203, 55, 53, 4, yes, no, no) \
- \
- SC(204, 56, 54, 1, yes, no, no) \
- SC(205, 56, 54, 2, yes, no, no) \
- SC(206, 56, 54, 3, yes, no, no) \
- SC(207, 56, 54, 4, yes, no, no) \
- \
- SC(208, 57, 55, 1, yes, no, no) \
- SC(209, 57, 55, 2, yes, no, no) \
- SC(210, 57, 55, 3, yes, no, no) \
- SC(211, 57, 55, 4, yes, no, no) \
- \
- SC(212, 58, 56, 1, yes, no, no) \
- SC(213, 58, 56, 2, yes, no, no) \
- SC(214, 58, 56, 3, yes, no, no) \
- SC(215, 58, 56, 4, yes, no, no) \
- \
- SC(216, 59, 57, 1, yes, no, no) \
- SC(217, 59, 57, 2, yes, no, no) \
- SC(218, 59, 57, 3, yes, no, no) \
- SC(219, 59, 57, 4, yes, no, no) \
- \
- SC(220, 60, 58, 1, yes, no, no) \
- SC(221, 60, 58, 2, yes, no, no) \
- SC(222, 60, 58, 3, yes, no, no) \
- SC(223, 60, 58, 4, yes, no, no) \
- \
- SC(224, 61, 59, 1, yes, no, no) \
- SC(225, 61, 59, 2, yes, no, no) \
- SC(226, 61, 59, 3, yes, no, no) \
- SC(227, 61, 59, 4, yes, no, no) \
- \
- SC(228, 62, 60, 1, yes, no, no) \
- SC(229, 62, 60, 2, yes, no, no) \
- SC(230, 62, 60, 3, yes, no, no) \
+#define SIZE_CLASSES \
+ /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \
+ SC( 0, 4, 4, 0, no, yes, 1, 4) \
+ SC( 1, 4, 4, 1, no, yes, 1, 4) \
+ SC( 2, 4, 4, 2, no, yes, 3, 4) \
+ SC( 3, 4, 4, 3, no, yes, 1, 4) \
+ \
+ SC( 4, 6, 4, 1, no, yes, 5, 4) \
+ SC( 5, 6, 4, 2, no, yes, 3, 4) \
+ SC( 6, 6, 4, 3, no, yes, 7, 4) \
+ SC( 7, 6, 4, 4, no, yes, 1, 4) \
+ \
+ SC( 8, 7, 5, 1, no, yes, 5, 5) \
+ SC( 9, 7, 5, 2, no, yes, 3, 5) \
+ SC( 10, 7, 5, 3, no, yes, 7, 5) \
+ SC( 11, 7, 5, 4, no, yes, 1, 5) \
+ \
+ SC( 12, 8, 6, 1, no, yes, 5, 6) \
+ SC( 13, 8, 6, 2, no, yes, 3, 6) \
+ SC( 14, 8, 6, 3, no, yes, 7, 6) \
+ SC( 15, 8, 6, 4, no, yes, 1, 6) \
+ \
+ SC( 16, 9, 7, 1, no, yes, 5, 7) \
+ SC( 17, 9, 7, 2, no, yes, 3, 7) \
+ SC( 18, 9, 7, 3, no, yes, 7, 7) \
+ SC( 19, 9, 7, 4, no, yes, 1, 7) \
+ \
+ SC( 20, 10, 8, 1, no, yes, 5, 8) \
+ SC( 21, 10, 8, 2, no, yes, 3, 8) \
+ SC( 22, 10, 8, 3, no, yes, 7, 8) \
+ SC( 23, 10, 8, 4, no, yes, 1, 8) \
+ \
+ SC( 24, 11, 9, 1, no, yes, 5, 9) \
+ SC( 25, 11, 9, 2, no, yes, 3, 9) \
+ SC( 26, 11, 9, 3, no, yes, 7, 9) \
+ SC( 27, 11, 9, 4, no, yes, 1, 9) \
+ \
+ SC( 28, 12, 10, 1, no, yes, 5, no) \
+ SC( 29, 12, 10, 2, no, yes, 3, no) \
+ SC( 30, 12, 10, 3, no, yes, 7, no) \
+ SC( 31, 12, 10, 4, no, yes, 1, no) \
+ \
+ SC( 32, 13, 11, 1, no, yes, 5, no) \
+ SC( 33, 13, 11, 2, no, yes, 3, no) \
+ SC( 34, 13, 11, 3, no, yes, 7, no) \
+ SC( 35, 13, 11, 4, no, yes, 1, no) \
+ \
+ SC( 36, 14, 12, 1, no, yes, 5, no) \
+ SC( 37, 14, 12, 2, no, yes, 3, no) \
+ SC( 38, 14, 12, 3, no, yes, 7, no) \
+ SC( 39, 14, 12, 4, no, yes, 1, no) \
+ \
+ SC( 40, 15, 13, 1, no, yes, 5, no) \
+ SC( 41, 15, 13, 2, no, yes, 3, no) \
+ SC( 42, 15, 13, 3, no, yes, 7, no) \
+ SC( 43, 15, 13, 4, yes, yes, 1, no) \
+ \
+ SC( 44, 16, 14, 1, no, yes, 5, no) \
+ SC( 45, 16, 14, 2, no, yes, 3, no) \
+ SC( 46, 16, 14, 3, no, yes, 7, no) \
+ SC( 47, 16, 14, 4, yes, yes, 2, no) \
+ \
+ SC( 48, 17, 15, 1, no, yes, 5, no) \
+ SC( 49, 17, 15, 2, yes, yes, 3, no) \
+ SC( 50, 17, 15, 3, no, yes, 7, no) \
+ SC( 51, 17, 15, 4, yes, no, 0, no) \
+ \
+ SC( 52, 18, 16, 1, yes, no, 0, no) \
+ SC( 53, 18, 16, 2, yes, no, 0, no) \
+ SC( 54, 18, 16, 3, yes, no, 0, no) \
+ SC( 55, 18, 16, 4, yes, no, 0, no) \
+ \
+ SC( 56, 19, 17, 1, yes, no, 0, no) \
+ SC( 57, 19, 17, 2, yes, no, 0, no) \
+ SC( 58, 19, 17, 3, yes, no, 0, no) \
+ SC( 59, 19, 17, 4, yes, no, 0, no) \
+ \
+ SC( 60, 20, 18, 1, yes, no, 0, no) \
+ SC( 61, 20, 18, 2, yes, no, 0, no) \
+ SC( 62, 20, 18, 3, yes, no, 0, no) \
+ SC( 63, 20, 18, 4, yes, no, 0, no) \
+ \
+ SC( 64, 21, 19, 1, yes, no, 0, no) \
+ SC( 65, 21, 19, 2, yes, no, 0, no) \
+ SC( 66, 21, 19, 3, yes, no, 0, no) \
+ SC( 67, 21, 19, 4, yes, no, 0, no) \
+ \
+ SC( 68, 22, 20, 1, yes, no, 0, no) \
+ SC( 69, 22, 20, 2, yes, no, 0, no) \
+ SC( 70, 22, 20, 3, yes, no, 0, no) \
+ SC( 71, 22, 20, 4, yes, no, 0, no) \
+ \
+ SC( 72, 23, 21, 1, yes, no, 0, no) \
+ SC( 73, 23, 21, 2, yes, no, 0, no) \
+ SC( 74, 23, 21, 3, yes, no, 0, no) \
+ SC( 75, 23, 21, 4, yes, no, 0, no) \
+ \
+ SC( 76, 24, 22, 1, yes, no, 0, no) \
+ SC( 77, 24, 22, 2, yes, no, 0, no) \
+ SC( 78, 24, 22, 3, yes, no, 0, no) \
+ SC( 79, 24, 22, 4, yes, no, 0, no) \
+ \
+ SC( 80, 25, 23, 1, yes, no, 0, no) \
+ SC( 81, 25, 23, 2, yes, no, 0, no) \
+ SC( 82, 25, 23, 3, yes, no, 0, no) \
+ SC( 83, 25, 23, 4, yes, no, 0, no) \
+ \
+ SC( 84, 26, 24, 1, yes, no, 0, no) \
+ SC( 85, 26, 24, 2, yes, no, 0, no) \
+ SC( 86, 26, 24, 3, yes, no, 0, no) \
+ SC( 87, 26, 24, 4, yes, no, 0, no) \
+ \
+ SC( 88, 27, 25, 1, yes, no, 0, no) \
+ SC( 89, 27, 25, 2, yes, no, 0, no) \
+ SC( 90, 27, 25, 3, yes, no, 0, no) \
+ SC( 91, 27, 25, 4, yes, no, 0, no) \
+ \
+ SC( 92, 28, 26, 1, yes, no, 0, no) \
+ SC( 93, 28, 26, 2, yes, no, 0, no) \
+ SC( 94, 28, 26, 3, yes, no, 0, no) \
+ SC( 95, 28, 26, 4, yes, no, 0, no) \
+ \
+ SC( 96, 29, 27, 1, yes, no, 0, no) \
+ SC( 97, 29, 27, 2, yes, no, 0, no) \
+ SC( 98, 29, 27, 3, yes, no, 0, no) \
+ SC( 99, 29, 27, 4, yes, no, 0, no) \
+ \
+ SC(100, 30, 28, 1, yes, no, 0, no) \
+ SC(101, 30, 28, 2, yes, no, 0, no) \
+ SC(102, 30, 28, 3, yes, no, 0, no) \
+ SC(103, 30, 28, 4, yes, no, 0, no) \
+ \
+ SC(104, 31, 29, 1, yes, no, 0, no) \
+ SC(105, 31, 29, 2, yes, no, 0, no) \
+ SC(106, 31, 29, 3, yes, no, 0, no) \
+ SC(107, 31, 29, 4, yes, no, 0, no) \
+ \
+ SC(108, 32, 30, 1, yes, no, 0, no) \
+ SC(109, 32, 30, 2, yes, no, 0, no) \
+ SC(110, 32, 30, 3, yes, no, 0, no) \
+ SC(111, 32, 30, 4, yes, no, 0, no) \
+ \
+ SC(112, 33, 31, 1, yes, no, 0, no) \
+ SC(113, 33, 31, 2, yes, no, 0, no) \
+ SC(114, 33, 31, 3, yes, no, 0, no) \
+ SC(115, 33, 31, 4, yes, no, 0, no) \
+ \
+ SC(116, 34, 32, 1, yes, no, 0, no) \
+ SC(117, 34, 32, 2, yes, no, 0, no) \
+ SC(118, 34, 32, 3, yes, no, 0, no) \
+ SC(119, 34, 32, 4, yes, no, 0, no) \
+ \
+ SC(120, 35, 33, 1, yes, no, 0, no) \
+ SC(121, 35, 33, 2, yes, no, 0, no) \
+ SC(122, 35, 33, 3, yes, no, 0, no) \
+ SC(123, 35, 33, 4, yes, no, 0, no) \
+ \
+ SC(124, 36, 34, 1, yes, no, 0, no) \
+ SC(125, 36, 34, 2, yes, no, 0, no) \
+ SC(126, 36, 34, 3, yes, no, 0, no) \
+ SC(127, 36, 34, 4, yes, no, 0, no) \
+ \
+ SC(128, 37, 35, 1, yes, no, 0, no) \
+ SC(129, 37, 35, 2, yes, no, 0, no) \
+ SC(130, 37, 35, 3, yes, no, 0, no) \
+ SC(131, 37, 35, 4, yes, no, 0, no) \
+ \
+ SC(132, 38, 36, 1, yes, no, 0, no) \
+ SC(133, 38, 36, 2, yes, no, 0, no) \
+ SC(134, 38, 36, 3, yes, no, 0, no) \
+ SC(135, 38, 36, 4, yes, no, 0, no) \
+ \
+ SC(136, 39, 37, 1, yes, no, 0, no) \
+ SC(137, 39, 37, 2, yes, no, 0, no) \
+ SC(138, 39, 37, 3, yes, no, 0, no) \
+ SC(139, 39, 37, 4, yes, no, 0, no) \
+ \
+ SC(140, 40, 38, 1, yes, no, 0, no) \
+ SC(141, 40, 38, 2, yes, no, 0, no) \
+ SC(142, 40, 38, 3, yes, no, 0, no) \
+ SC(143, 40, 38, 4, yes, no, 0, no) \
+ \
+ SC(144, 41, 39, 1, yes, no, 0, no) \
+ SC(145, 41, 39, 2, yes, no, 0, no) \
+ SC(146, 41, 39, 3, yes, no, 0, no) \
+ SC(147, 41, 39, 4, yes, no, 0, no) \
+ \
+ SC(148, 42, 40, 1, yes, no, 0, no) \
+ SC(149, 42, 40, 2, yes, no, 0, no) \
+ SC(150, 42, 40, 3, yes, no, 0, no) \
+ SC(151, 42, 40, 4, yes, no, 0, no) \
+ \
+ SC(152, 43, 41, 1, yes, no, 0, no) \
+ SC(153, 43, 41, 2, yes, no, 0, no) \
+ SC(154, 43, 41, 3, yes, no, 0, no) \
+ SC(155, 43, 41, 4, yes, no, 0, no) \
+ \
+ SC(156, 44, 42, 1, yes, no, 0, no) \
+ SC(157, 44, 42, 2, yes, no, 0, no) \
+ SC(158, 44, 42, 3, yes, no, 0, no) \
+ SC(159, 44, 42, 4, yes, no, 0, no) \
+ \
+ SC(160, 45, 43, 1, yes, no, 0, no) \
+ SC(161, 45, 43, 2, yes, no, 0, no) \
+ SC(162, 45, 43, 3, yes, no, 0, no) \
+ SC(163, 45, 43, 4, yes, no, 0, no) \
+ \
+ SC(164, 46, 44, 1, yes, no, 0, no) \
+ SC(165, 46, 44, 2, yes, no, 0, no) \
+ SC(166, 46, 44, 3, yes, no, 0, no) \
+ SC(167, 46, 44, 4, yes, no, 0, no) \
+ \
+ SC(168, 47, 45, 1, yes, no, 0, no) \
+ SC(169, 47, 45, 2, yes, no, 0, no) \
+ SC(170, 47, 45, 3, yes, no, 0, no) \
+ SC(171, 47, 45, 4, yes, no, 0, no) \
+ \
+ SC(172, 48, 46, 1, yes, no, 0, no) \
+ SC(173, 48, 46, 2, yes, no, 0, no) \
+ SC(174, 48, 46, 3, yes, no, 0, no) \
+ SC(175, 48, 46, 4, yes, no, 0, no) \
+ \
+ SC(176, 49, 47, 1, yes, no, 0, no) \
+ SC(177, 49, 47, 2, yes, no, 0, no) \
+ SC(178, 49, 47, 3, yes, no, 0, no) \
+ SC(179, 49, 47, 4, yes, no, 0, no) \
+ \
+ SC(180, 50, 48, 1, yes, no, 0, no) \
+ SC(181, 50, 48, 2, yes, no, 0, no) \
+ SC(182, 50, 48, 3, yes, no, 0, no) \
+ SC(183, 50, 48, 4, yes, no, 0, no) \
+ \
+ SC(184, 51, 49, 1, yes, no, 0, no) \
+ SC(185, 51, 49, 2, yes, no, 0, no) \
+ SC(186, 51, 49, 3, yes, no, 0, no) \
+ SC(187, 51, 49, 4, yes, no, 0, no) \
+ \
+ SC(188, 52, 50, 1, yes, no, 0, no) \
+ SC(189, 52, 50, 2, yes, no, 0, no) \
+ SC(190, 52, 50, 3, yes, no, 0, no) \
+ SC(191, 52, 50, 4, yes, no, 0, no) \
+ \
+ SC(192, 53, 51, 1, yes, no, 0, no) \
+ SC(193, 53, 51, 2, yes, no, 0, no) \
+ SC(194, 53, 51, 3, yes, no, 0, no) \
+ SC(195, 53, 51, 4, yes, no, 0, no) \
+ \
+ SC(196, 54, 52, 1, yes, no, 0, no) \
+ SC(197, 54, 52, 2, yes, no, 0, no) \
+ SC(198, 54, 52, 3, yes, no, 0, no) \
+ SC(199, 54, 52, 4, yes, no, 0, no) \
+ \
+ SC(200, 55, 53, 1, yes, no, 0, no) \
+ SC(201, 55, 53, 2, yes, no, 0, no) \
+ SC(202, 55, 53, 3, yes, no, 0, no) \
+ SC(203, 55, 53, 4, yes, no, 0, no) \
+ \
+ SC(204, 56, 54, 1, yes, no, 0, no) \
+ SC(205, 56, 54, 2, yes, no, 0, no) \
+ SC(206, 56, 54, 3, yes, no, 0, no) \
+ SC(207, 56, 54, 4, yes, no, 0, no) \
+ \
+ SC(208, 57, 55, 1, yes, no, 0, no) \
+ SC(209, 57, 55, 2, yes, no, 0, no) \
+ SC(210, 57, 55, 3, yes, no, 0, no) \
+ SC(211, 57, 55, 4, yes, no, 0, no) \
+ \
+ SC(212, 58, 56, 1, yes, no, 0, no) \
+ SC(213, 58, 56, 2, yes, no, 0, no) \
+ SC(214, 58, 56, 3, yes, no, 0, no) \
+ SC(215, 58, 56, 4, yes, no, 0, no) \
+ \
+ SC(216, 59, 57, 1, yes, no, 0, no) \
+ SC(217, 59, 57, 2, yes, no, 0, no) \
+ SC(218, 59, 57, 3, yes, no, 0, no) \
+ SC(219, 59, 57, 4, yes, no, 0, no) \
+ \
+ SC(220, 60, 58, 1, yes, no, 0, no) \
+ SC(221, 60, 58, 2, yes, no, 0, no) \
+ SC(222, 60, 58, 3, yes, no, 0, no) \
+ SC(223, 60, 58, 4, yes, no, 0, no) \
+ \
+ SC(224, 61, 59, 1, yes, no, 0, no) \
+ SC(225, 61, 59, 2, yes, no, 0, no) \
+ SC(226, 61, 59, 3, yes, no, 0, no) \
+ SC(227, 61, 59, 4, yes, no, 0, no) \
+ \
+ SC(228, 62, 60, 1, yes, no, 0, no) \
+ SC(229, 62, 60, 2, yes, no, 0, no) \
+ SC(230, 62, 60, 3, yes, no, 0, no) \
-#define SIZE_CLASSES_DEFINED
-#define NTBINS 0
-#define NLBINS 28
-#define NBINS 51
-#define NSIZES 231
-#define NPSIZES 183
-#define LG_TINY_MAXCLASS "NA"
-#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
-#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
-#define LG_LARGE_MINCLASS 18
-#define HUGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
+#define SIZE_CLASSES_DEFINED
+#define NTBINS 0
+#define NLBINS 28
+#define NBINS 51
+#define NSIZES 231
+#define LG_CEIL_NSIZES 8
+#define NPSIZES 183
+#define LG_TINY_MAXCLASS "NA"
+#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
+#define SMALL_MAXCLASS ((((size_t)1) << 17) + (((size_t)3) << 15))
+#define LG_LARGE_MINCLASS 18
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
+#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60))
#endif
#ifndef SIZE_CLASSES_DEFINED
@@ -5493,28 +5547,10 @@
#undef SIZE_CLASSES_DEFINED
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
- * cannot support more than 256 small size classes. Further constrain NBINS to
- * 255 since all small size classes, plus a "not small" size class must be
- * stored in 8 bits of arena_chunk_map_bits_t's bits field.
+ * cannot support more than 256 small size classes.
*/
-#if (NBINS > 255)
+#if (NBINS > 256)
# error "Too many small size classes"
#endif
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/smoothstep.h b/contrib/jemalloc/include/jemalloc/internal/smoothstep.h
index c5333ccad381..2e14430f5f1b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/smoothstep.h
+++ b/contrib/jemalloc/include/jemalloc/internal/smoothstep.h
@@ -1,9 +1,11 @@
+#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
+#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
+
/*
* This file was generated by the following command:
* sh smoothstep.sh smoother 200 24 3 15
*/
/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
/*
* This header defines a precomputed table based on the smoothstep family of
@@ -21,10 +23,10 @@
* smootheststep(x) = -20x + 70x - 84x + 35x
*/
-#define SMOOTHSTEP_VARIANT "smoother"
-#define SMOOTHSTEP_NSTEPS 200
-#define SMOOTHSTEP_BFP 24
-#define SMOOTHSTEP \
+#define SMOOTHSTEP_VARIANT "smoother"
+#define SMOOTHSTEP_NSTEPS 200
+#define SMOOTHSTEP_BFP 24
+#define SMOOTHSTEP \
/* STEP(step, h, x, y) */ \
STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
@@ -227,20 +229,4 @@
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/spin.h b/contrib/jemalloc/include/jemalloc/internal/spin.h
index 9ef5ceb92407..e2afc98cfda2 100644
--- a/contrib/jemalloc/include/jemalloc/internal/spin.h
+++ b/contrib/jemalloc/include/jemalloc/internal/spin.h
@@ -1,51 +1,36 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_SPIN_H
+#define JEMALLOC_INTERNAL_SPIN_H
-typedef struct spin_s spin_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct spin_s {
- unsigned iteration;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void spin_init(spin_t *spin);
-void spin_adaptive(spin_t *spin);
+#ifdef JEMALLOC_SPIN_C_
+# define SPIN_INLINE extern inline
+#else
+# define SPIN_INLINE inline
#endif
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
-JEMALLOC_INLINE void
-spin_init(spin_t *spin)
-{
+#define SPIN_INITIALIZER {0U}
- spin->iteration = 0;
-}
-
-JEMALLOC_INLINE void
-spin_adaptive(spin_t *spin)
-{
- volatile uint64_t i;
+typedef struct {
+ unsigned iteration;
+} spin_t;
- for (i = 0; i < (KQU(1) << spin->iteration); i++)
- CPU_SPINWAIT;
+SPIN_INLINE void
+spin_adaptive(spin_t *spin) {
+ volatile uint32_t i;
- if (spin->iteration < 63)
+ if (spin->iteration < 5) {
+ for (i = 0; i < (1U << spin->iteration); i++) {
+ CPU_SPINWAIT;
+ }
spin->iteration++;
-}
-
+ } else {
+#ifdef _WIN32
+ SwitchToThread();
+#else
+ sched_yield();
#endif
+ }
+}
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#undef SPIN_INLINE
+#endif /* JEMALLOC_INTERNAL_SPIN_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/stats.h b/contrib/jemalloc/include/jemalloc/internal/stats.h
index 04e7dae14c7e..1198779ab9cf 100644
--- a/contrib/jemalloc/include/jemalloc/internal/stats.h
+++ b/contrib/jemalloc/include/jemalloc/internal/stats.h
@@ -1,26 +1,51 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_STATS_H
+#define JEMALLOC_INTERNAL_STATS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats_tsd.h"
+
+/* OPTION(opt, var_name, default, set_value_to) */
+#define STATS_PRINT_OPTIONS \
+ OPTION('J', json, false, true) \
+ OPTION('g', general, true, false) \
+ OPTION('m', merged, config_stats, false) \
+ OPTION('d', destroyed, config_stats, false) \
+ OPTION('a', unmerged, config_stats, false) \
+ OPTION('b', bins, true, false) \
+ OPTION('l', large, true, false) \
+ OPTION('x', mutex, true, false)
+
+enum {
+#define OPTION(o, v, d, s) stats_print_option_num_##v,
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ stats_print_tot_num_options
+};
-typedef struct tcache_bin_stats_s tcache_bin_stats_t;
-typedef struct malloc_bin_stats_s malloc_bin_stats_t;
-typedef struct malloc_large_stats_s malloc_large_stats_t;
-typedef struct malloc_huge_stats_s malloc_huge_stats_t;
-typedef struct arena_stats_s arena_stats_t;
-typedef struct chunk_stats_s chunk_stats_t;
+/* Options for stats_print. */
+extern bool opt_stats_print;
+extern char opt_stats_print_opts[stats_print_tot_num_options+1];
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
+/* Implements je_malloc_stats_print. */
+void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts);
-struct tcache_bin_stats_s {
- /*
- * Number of allocation requests that corresponded to the size of this
- * bin.
- */
- uint64_t nrequests;
-};
+/*
+ * In those architectures that support 64-bit atomics, we use atomic updates for
+ * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
+ * externally.
+ */
+#ifdef JEMALLOC_ATOMIC_U64
+typedef atomic_u64_t arena_stats_u64_t;
+#else
+/* Must hold the arena stats mutex while reading atomically. */
+typedef uint64_t arena_stats_u64_t;
+#endif
-struct malloc_bin_stats_s {
+typedef struct malloc_bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
@@ -49,149 +74,91 @@ struct malloc_bin_stats_s {
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
- /* Total number of runs created for this bin's size class. */
- uint64_t nruns;
+ /* Total number of slabs created for this bin's size class. */
+ uint64_t nslabs;
/*
- * Total number of runs reused by extracting them from the runs tree for
- * this bin's size class.
+ * Total number of slabs reused by extracting them from the slabs heap
+ * for this bin's size class.
*/
- uint64_t reruns;
+ uint64_t reslabs;
- /* Current number of runs in this bin. */
- size_t curruns;
-};
+ /* Current number of slabs in this bin. */
+ size_t curslabs;
+
+ mutex_prof_data_t mutex_data;
+} malloc_bin_stats_t;
-struct malloc_large_stats_s {
+typedef struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
- * the arena. Note that tcache may allocate an object, then recycle it
- * many times, resulting many increments to nrequests, but only one
- * each to nmalloc and ndalloc.
+ * the arena.
*/
- uint64_t nmalloc;
- uint64_t ndalloc;
+ arena_stats_u64_t nmalloc;
+ arena_stats_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
- uint64_t nrequests;
+ arena_stats_u64_t nrequests; /* Partially derived. */
+
+ /* Current number of allocations of this size class. */
+ size_t curlextents; /* Derived. */
+} malloc_large_stats_t;
+
+typedef struct decay_stats_s {
+ /* Total number of purge sweeps. */
+ arena_stats_u64_t npurge;
+ /* Total number of madvise calls made. */
+ arena_stats_u64_t nmadvise;
+ /* Total number of pages purged. */
+ arena_stats_u64_t purged;
+} decay_stats_t;
+
+/*
+ * Arena stats. Note that fields marked "derived" are not directly maintained
+ * within the arena code; rather their values are derived during stats merge
+ * requests.
+ */
+typedef struct arena_stats_s {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_t mtx;
+#endif
- /*
- * Current number of runs of this size class, including runs currently
- * cached by tcache.
- */
- size_t curruns;
-};
+ /* Number of bytes currently mapped, excluding retained memory. */
+ atomic_zu_t mapped; /* Partially derived. */
-struct malloc_huge_stats_s {
/*
- * Total number of allocation/deallocation requests served directly by
- * the arena.
+ * Number of unused virtual memory bytes currently retained. Retained
+ * bytes are technically mapped (though always decommitted or purged),
+ * but they are excluded from the mapped statistic (above).
*/
- uint64_t nmalloc;
- uint64_t ndalloc;
-
- /* Current number of (multi-)chunk allocations of this size class. */
- size_t curhchunks;
-};
+ atomic_zu_t retained; /* Derived. */
-struct arena_stats_s {
- /* Number of bytes currently mapped. */
- size_t mapped;
+ decay_stats_t decay_dirty;
+ decay_stats_t decay_muzzy;
- /*
- * Number of bytes currently retained as a side effect of munmap() being
- * disabled/bypassed. Retained bytes are technically mapped (though
- * always decommitted or purged), but they are excluded from the mapped
- * statistic (above).
- */
- size_t retained;
+ atomic_zu_t base; /* Derived. */
+ atomic_zu_t internal;
+ atomic_zu_t resident; /* Derived. */
- /*
- * Total number of purge sweeps, total number of madvise calls made,
- * and total pages purged in order to keep dirty unused memory under
- * control.
- */
- uint64_t npurge;
- uint64_t nmadvise;
- uint64_t purged;
-
- /*
- * Number of bytes currently mapped purely for metadata purposes, and
- * number of bytes currently allocated for internal metadata.
- */
- size_t metadata_mapped;
- size_t metadata_allocated; /* Protected via atomic_*_z(). */
+ atomic_zu_t allocated_large; /* Derived. */
+ arena_stats_u64_t nmalloc_large; /* Derived. */
+ arena_stats_u64_t ndalloc_large; /* Derived. */
+ arena_stats_u64_t nrequests_large; /* Derived. */
- /* Per-size-category statistics. */
- size_t allocated_large;
- uint64_t nmalloc_large;
- uint64_t ndalloc_large;
- uint64_t nrequests_large;
+ /* Number of bytes cached in tcache associated with this arena. */
+ atomic_zu_t tcache_bytes; /* Derived. */
- size_t allocated_huge;
- uint64_t nmalloc_huge;
- uint64_t ndalloc_huge;
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
- malloc_large_stats_t *lstats;
+ malloc_large_stats_t lstats[NSIZES - NBINS];
- /* One element for each huge size class. */
- malloc_huge_stats_t *hstats;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern bool opt_stats_print;
-
-extern size_t stats_cactive;
-
-void stats_print(void (*write)(void *, const char *), void *cbopaque,
- const char *opts);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-size_t stats_cactive_get(void);
-void stats_cactive_add(size_t size);
-void stats_cactive_sub(size_t size);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
-JEMALLOC_INLINE size_t
-stats_cactive_get(void)
-{
-
- return (atomic_read_z(&stats_cactive));
-}
-
-JEMALLOC_INLINE void
-stats_cactive_add(size_t size)
-{
-
- assert(size > 0);
- assert((size & chunksize_mask) == 0);
-
- atomic_add_z(&stats_cactive, size);
-}
-
-JEMALLOC_INLINE void
-stats_cactive_sub(size_t size)
-{
-
- assert(size > 0);
- assert((size & chunksize_mask) == 0);
-
- atomic_sub_z(&stats_cactive, size);
-}
-#endif
+ /* Arena uptime. */
+ nstime_t uptime;
+} arena_stats_t;
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_STATS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/stats_tsd.h b/contrib/jemalloc/include/jemalloc/internal/stats_tsd.h
new file mode 100644
index 000000000000..d0c3bbe49455
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/stats_tsd.h
@@ -0,0 +1,12 @@
+#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
+#define JEMALLOC_INTERNAL_STATS_TSD_H
+
+typedef struct tcache_bin_stats_s {
+ /*
+ * Number of allocation requests that corresponded to the size of this
+ * bin.
+ */
+ uint64_t nrequests;
+} tcache_bin_stats_t;
+
+#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/sz.h b/contrib/jemalloc/include/jemalloc/internal/sz.h
new file mode 100644
index 000000000000..7f640d55ad78
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/sz.h
@@ -0,0 +1,317 @@
+#ifndef JEMALLOC_INTERNAL_SIZE_H
+#define JEMALLOC_INTERNAL_SIZE_H
+
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/pages.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
+
+/*
+ * sz module: Size computations.
+ *
+ * Some abbreviations used here:
+ * p: Page
+ * ind: Index
+ * s, sz: Size
+ * u: Usable size
+ * a: Aligned
+ *
+ * These are not always used completely consistently, but should be enough to
+ * interpret function names. E.g. sz_psz2ind converts page size to page size
+ * index; sz_sa2u converts a (size, alignment) allocation request to the usable
+ * size that would result from such an allocation.
+ */
+
+/*
+ * sz_pind2sz_tab encodes the same information as could be computed by
+ * sz_pind2sz_compute().
+ */
+extern size_t const sz_pind2sz_tab[NPSIZES+1];
+/*
+ * sz_index2size_tab encodes the same information as could be computed (at
+ * unacceptable cost in some code paths) by sz_index2size_compute().
+ */
+extern size_t const sz_index2size_tab[NSIZES];
+/*
+ * sz_size2index_tab is a compact lookup table that rounds request sizes up to
+ * size classes. In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via sz_size2index().
+ */
+extern uint8_t const sz_size2index_tab[];
+
+static const size_t sz_large_pad =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ PAGE
+#else
+ 0
+#endif
+ ;
+
+JEMALLOC_ALWAYS_INLINE pszind_t
+sz_psz2ind(size_t psz) {
+ if (unlikely(psz > LARGE_MAXCLASS)) {
+ return NPSIZES;
+ }
+ {
+ pszind_t x = lg_floor((psz<<1)-1);
+ pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
+ (LG_SIZE_CLASS_GROUP + LG_PAGE);
+ pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZD(-1) << lg_delta;
+ pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ pszind_t ind = grp + mod;
+ return ind;
+ }
+}
+
+static inline size_t
+sz_pind2sz_compute(pszind_t pind) {
+ if (unlikely(pind == NPSIZES)) {
+ return LARGE_MAXCLASS + PAGE;
+ }
+ {
+ size_t grp = pind >> LG_SIZE_CLASS_GROUP;
+ size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_PAGE +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_PAGE-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t sz = grp_size + mod_size;
+ return sz;
+ }
+}
+
+static inline size_t
+sz_pind2sz_lookup(pszind_t pind) {
+ size_t ret = (size_t)sz_pind2sz_tab[pind];
+ assert(ret == sz_pind2sz_compute(pind));
+ return ret;
+}
+
+static inline size_t
+sz_pind2sz(pszind_t pind) {
+ assert(pind < NPSIZES+1);
+ return sz_pind2sz_lookup(pind);
+}
+
+static inline size_t
+sz_psz2u(size_t psz) {
+ if (unlikely(psz > LARGE_MAXCLASS)) {
+ return LARGE_MAXCLASS + PAGE;
+ }
+ {
+ size_t x = lg_floor((psz<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
+ LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (psz + delta_mask) & ~delta_mask;
+ return usize;
+ }
+}
+
+static inline szind_t
+sz_size2index_compute(size_t size) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return NSIZES;
+ }
+#if (NTBINS != 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
+ }
+#endif
+ {
+ szind_t x = lg_floor((size<<1)-1);
+ szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+ x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
+ szind_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZD(-1) << lg_delta;
+ szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ szind_t index = NTBINS + grp + mod;
+ return index;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index_lookup(size_t size) {
+ assert(size <= LOOKUP_MAXCLASS);
+ {
+ szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]);
+ assert(ret == sz_size2index_compute(size));
+ return ret;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index(size_t size) {
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS)) {
+ return sz_size2index_lookup(size);
+ }
+ return sz_size2index_compute(size);
+}
+
+static inline size_t
+sz_index2size_compute(szind_t index) {
+#if (NTBINS > 0)
+ if (index < NTBINS) {
+ return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
+ }
+#endif
+ {
+ size_t reduced_index = index - NTBINS;
+ size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
+ size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
+ 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_QUANTUM +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_QUANTUM-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t usize = grp_size + mod_size;
+ return usize;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size_lookup(szind_t index) {
+ size_t ret = (size_t)sz_index2size_tab[index];
+ assert(ret == sz_index2size_compute(index));
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size(szind_t index) {
+ assert(index < NSIZES);
+ return sz_index2size_lookup(index);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u_compute(size_t size) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return 0;
+ }
+#if (NTBINS > 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
+ (ZU(1) << lg_ceil));
+ }
+#endif
+ {
+ size_t x = lg_floor((size<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (size + delta_mask) & ~delta_mask;
+ return usize;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u_lookup(size_t size) {
+ size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
+
+ assert(ret == sz_s2u_compute(size));
+ return ret;
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sz_s2u(size_t size) {
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS)) {
+ return sz_s2u_lookup(size);
+ }
+ return sz_s2u_compute(size);
+}
+
+/*
+ * Compute usable size that would result from allocating an object with the
+ * specified size and alignment.
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+sz_sa2u(size_t size, size_t alignment) {
+ size_t usize;
+
+ assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
+
+ /* Try for a small size class. */
+ if (size <= SMALL_MAXCLASS && alignment < PAGE) {
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each
+ * small size class, every object is aligned at the smallest
+ * power of two that is non-zero in the base two representation
+ * of the size. For example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ */
+ usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
+ if (usize < LARGE_MINCLASS) {
+ return usize;
+ }
+ }
+
+ /* Large size class. Beware of overflow. */
+
+ if (unlikely(alignment > LARGE_MAXCLASS)) {
+ return 0;
+ }
+
+ /* Make sure result is a large size class. */
+ if (size <= LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
+ } else {
+ usize = sz_s2u(size);
+ if (usize < size) {
+ /* size_t overflow. */
+ return 0;
+ }
+ }
+
+ /*
+ * Calculate the multi-page mapping that large_palloc() would need in
+ * order to guarantee the alignment.
+ */
+ if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
+ /* size_t overflow. */
+ return 0;
+ }
+ return usize;
+}
+
+#endif /* JEMALLOC_INTERNAL_SIZE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache.h b/contrib/jemalloc/include/jemalloc/internal/tcache.h
deleted file mode 100644
index 5fe5ebfa3794..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/tcache.h
+++ /dev/null
@@ -1,472 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct tcache_bin_info_s tcache_bin_info_t;
-typedef struct tcache_bin_s tcache_bin_t;
-typedef struct tcache_s tcache_t;
-typedef struct tcaches_s tcaches_t;
-
-/*
- * tcache pointers close to NULL are used to encode state information that is
- * used for two purposes: preventing thread caching on a per thread basis and
- * cleaning up during thread shutdown.
- */
-#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
-#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
-#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
-#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
-
-/*
- * Absolute minimum number of cache slots for each small bin.
- */
-#define TCACHE_NSLOTS_SMALL_MIN 20
-
-/*
- * Absolute maximum number of cache slots for each small bin in the thread
- * cache. This is an additional constraint beyond that imposed as: twice the
- * number of regions per run for this size class.
- *
- * This constant must be an even number.
- */
-#define TCACHE_NSLOTS_SMALL_MAX 200
-
-/* Number of cache slots for large size classes. */
-#define TCACHE_NSLOTS_LARGE 20
-
-/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
-#define LG_TCACHE_MAXCLASS_DEFAULT 15
-
-/*
- * TCACHE_GC_SWEEP is the approximate number of allocation events between
- * full GC sweeps. Integer rounding may cause the actual number to be
- * slightly higher, since GC is performed incrementally.
- */
-#define TCACHE_GC_SWEEP 8192
-
-/* Number of tcache allocation/deallocation events between incremental GCs. */
-#define TCACHE_GC_INCR \
- ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-typedef enum {
- tcache_enabled_false = 0, /* Enable cast to/from bool. */
- tcache_enabled_true = 1,
- tcache_enabled_default = 2
-} tcache_enabled_t;
-
-/*
- * Read-only information associated with each element of tcache_t's tbins array
- * is stored separately, mainly to reduce memory usage.
- */
-struct tcache_bin_info_s {
- unsigned ncached_max; /* Upper limit on ncached. */
-};
-
-struct tcache_bin_s {
- tcache_bin_stats_t tstats;
- int low_water; /* Min # cached since last GC. */
- unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
- unsigned ncached; /* # of cached objects. */
- /*
- * To make use of adjacent cacheline prefetch, the items in the avail
- * stack goes to higher address for newer allocations. avail points
- * just above the available space, which means that
- * avail[-ncached, ... -1] are available items and the lowest item will
- * be allocated first.
- */
- void **avail; /* Stack of available objects. */
-};
-
-struct tcache_s {
- ql_elm(tcache_t) link; /* Used for aggregating stats. */
- uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
- ticker_t gc_ticker; /* Drives incremental GC. */
- szind_t next_gc_bin; /* Next bin to GC. */
- tcache_bin_t tbins[1]; /* Dynamically sized. */
- /*
- * The pointer stacks associated with tbins follow as a contiguous
- * array. During tcache initialization, the avail pointer in each
- * element of tbins is initialized to point to the proper offset within
- * this array.
- */
-};
-
-/* Linkage for list of available (previously used) explicit tcache IDs. */
-struct tcaches_s {
- union {
- tcache_t *tcache;
- tcaches_t *next;
- };
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-extern bool opt_tcache;
-extern ssize_t opt_lg_tcache_max;
-
-extern tcache_bin_info_t *tcache_bin_info;
-
-/*
- * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
- * large-object bins.
- */
-extern unsigned nhbins;
-
-/* Maximum cached size class. */
-extern size_t tcache_maxclass;
-
-/*
- * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
- * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
- * completely disjoint from this data structure. tcaches starts off as a sparse
- * array, so it has no physical memory footprint until individual pages are
- * touched. This allows the entire array to be allocated the first time an
- * explicit tcache is created without a disproportionate impact on memory usage.
- */
-extern tcaches_t *tcaches;
-
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
-void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
-void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
- arena_t *oldarena, arena_t *newarena);
-tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
-void tcache_cleanup(tsd_t *tsd);
-void tcache_enabled_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
-void tcaches_flush(tsd_t *tsd, unsigned ind);
-void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn);
-void tcache_prefork(tsdn_t *tsdn);
-void tcache_postfork_parent(tsdn_t *tsdn);
-void tcache_postfork_child(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void tcache_event(tsd_t *tsd, tcache_t *tcache);
-void tcache_flush(void);
-bool tcache_enabled_get(void);
-tcache_t *tcache_get(tsd_t *tsd, bool create);
-void tcache_enabled_set(bool enabled);
-void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
-void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t ind, bool zero, bool slow_path);
-void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t ind, bool zero, bool slow_path);
-void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
- szind_t binind, bool slow_path);
-void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
- size_t size, bool slow_path);
-tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
-JEMALLOC_INLINE void
-tcache_flush(void)
-{
- tsd_t *tsd;
-
- cassert(config_tcache);
-
- tsd = tsd_fetch();
- tcache_cleanup(tsd);
-}
-
-JEMALLOC_INLINE bool
-tcache_enabled_get(void)
-{
- tsd_t *tsd;
- tcache_enabled_t tcache_enabled;
-
- cassert(config_tcache);
-
- tsd = tsd_fetch();
- tcache_enabled = tsd_tcache_enabled_get(tsd);
- if (tcache_enabled == tcache_enabled_default) {
- tcache_enabled = (tcache_enabled_t)opt_tcache;
- tsd_tcache_enabled_set(tsd, tcache_enabled);
- }
-
- return ((bool)tcache_enabled);
-}
-
-JEMALLOC_INLINE void
-tcache_enabled_set(bool enabled)
-{
- tsd_t *tsd;
- tcache_enabled_t tcache_enabled;
-
- cassert(config_tcache);
-
- tsd = tsd_fetch();
-
- tcache_enabled = (tcache_enabled_t)enabled;
- tsd_tcache_enabled_set(tsd, tcache_enabled);
-
- if (!enabled)
- tcache_cleanup(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(tsd_t *tsd, bool create)
-{
- tcache_t *tcache;
-
- if (!config_tcache)
- return (NULL);
-
- tcache = tsd_tcache_get(tsd);
- if (!create)
- return (tcache);
- if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
- tcache = tcache_get_hard(tsd);
- tsd_tcache_set(tsd, tcache);
- }
-
- return (tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_event(tsd_t *tsd, tcache_t *tcache)
-{
-
- if (TCACHE_GC_INCR == 0)
- return;
-
- if (unlikely(ticker_tick(&tcache->gc_ticker)))
- tcache_event_hard(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
-{
- void *ret;
-
- if (unlikely(tbin->ncached == 0)) {
- tbin->low_water = -1;
- *tcache_success = false;
- return (NULL);
- }
- /*
- * tcache_success (instead of ret) should be checked upon the return of
- * this function. We avoid checking (ret == NULL) because there is
- * never a null stored on the avail stack (which is unknown to the
- * compiler), and eagerly checking ret would cause pipeline stall
- * (waiting for the cacheline).
- */
- *tcache_success = true;
- ret = *(tbin->avail - tbin->ncached);
- tbin->ncached--;
-
- if (unlikely((int)tbin->ncached < tbin->low_water))
- tbin->low_water = tbin->ncached;
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
-{
- void *ret;
- tcache_bin_t *tbin;
- bool tcache_success;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- assert(binind < NBINS);
- tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- bool tcache_hard_success;
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
- tbin, binind, &tcache_hard_success);
- if (tcache_hard_success == false)
- return (NULL);
- }
-
- assert(ret);
- /*
- * Only compute usize if required. The checks in the following if
- * statement are all static.
- */
- if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
- usize = index2size(binind);
- assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
- }
-
- if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret,
- &arena_bin_info[binind], false);
- } else if (unlikely(opt_zero))
- memset(ret, 0, usize);
- }
- } else {
- if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &arena_bin_info[binind],
- true);
- }
- memset(ret, 0, usize);
- }
-
- if (config_stats)
- tbin->tstats.nrequests++;
- if (config_prof)
- tcache->prof_accumbytes += usize;
- tcache_event(tsd, tcache);
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
-{
- void *ret;
- tcache_bin_t *tbin;
- bool tcache_success;
-
- assert(binind < nhbins);
- tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- /*
- * Only allocate one large object at a time, because it's quite
- * expensive to create one and not use it.
- */
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
- if (ret == NULL)
- return (NULL);
- } else {
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- /* Only compute usize on demand */
- if (config_prof || (slow_path && config_fill) ||
- unlikely(zero)) {
- usize = index2size(binind);
- assert(usize <= tcache_maxclass);
- }
-
- if (config_prof && usize == LARGE_MINCLASS) {
- arena_chunk_t *chunk =
- (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
- size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
- LG_PAGE);
- arena_mapbits_large_binind_set(chunk, pageind,
- BININD_INVALID);
- }
- if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- memset(ret, JEMALLOC_ALLOC_JUNK,
- usize);
- } else if (unlikely(opt_zero))
- memset(ret, 0, usize);
- }
- } else
- memset(ret, 0, usize);
-
- if (config_stats)
- tbin->tstats.nrequests++;
- if (config_prof)
- tcache->prof_accumbytes += usize;
- }
-
- tcache_event(tsd, tcache);
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path)
-{
- tcache_bin_t *tbin;
- tcache_bin_info_t *tbin_info;
-
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
-
- if (slow_path && config_fill && unlikely(opt_junk_free))
- arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
-
- tbin = &tcache->tbins[binind];
- tbin_info = &tcache_bin_info[binind];
- if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
- tcache_bin_flush_small(tsd, tcache, tbin, binind,
- (tbin_info->ncached_max >> 1));
- }
- assert(tbin->ncached < tbin_info->ncached_max);
- tbin->ncached++;
- *(tbin->avail - tbin->ncached) = ptr;
-
- tcache_event(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
- bool slow_path)
-{
- szind_t binind;
- tcache_bin_t *tbin;
- tcache_bin_info_t *tbin_info;
-
- assert((size & PAGE_MASK) == 0);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
-
- binind = size2index(size);
-
- if (slow_path && config_fill && unlikely(opt_junk_free))
- arena_dalloc_junk_large(ptr, size);
-
- tbin = &tcache->tbins[binind];
- tbin_info = &tcache_bin_info[binind];
- if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
- tcache_bin_flush_large(tsd, tbin, binind,
- (tbin_info->ncached_max >> 1), tcache);
- }
- assert(tbin->ncached < tbin_info->ncached_max);
- tbin->ncached++;
- *(tbin->avail - tbin->ncached) = ptr;
-
- tcache_event(tsd, tcache);
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcaches_get(tsd_t *tsd, unsigned ind)
-{
- tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL)) {
- elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
- NULL));
- }
- return (elm->tcache);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h b/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
new file mode 100644
index 000000000000..abe133fabba9
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
@@ -0,0 +1,55 @@
+#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
+#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
+
+#include "jemalloc/internal/size_classes.h"
+
+extern bool opt_tcache;
+extern ssize_t opt_lg_tcache_max;
+
+extern tcache_bin_info_t *tcache_bin_info;
+
+/*
+ * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
+ * large-object bins.
+ */
+extern unsigned nhbins;
+
+/* Maximum cached size class. */
+extern size_t tcache_maxclass;
+
+/*
+ * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
+ * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
+ * completely disjoint from this data structure. tcaches starts off as a sparse
+ * array, so it has no physical memory footprint until individual pages are
+ * touched. This allows the entire array to be allocated the first time an
+ * explicit tcache is created without a disproportionate impact on memory usage.
+ */
+extern tcaches_t *tcaches;
+
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
+void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
+ arena_t *arena);
+tcache_t *tcache_create_explicit(tsd_t *tsd);
+void tcache_cleanup(tsd_t *tsd);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcaches_flush(tsd_t *tsd, unsigned ind);
+void tcaches_destroy(tsd_t *tsd, unsigned ind);
+bool tcache_boot(tsdn_t *tsdn);
+void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+void tcache_prefork(tsdn_t *tsdn);
+void tcache_postfork_parent(tsdn_t *tsdn);
+void tcache_postfork_child(tsdn_t *tsdn);
+void tcache_flush(void);
+bool tsd_tcache_data_init(tsd_t *tsd);
+bool tsd_tcache_enabled_data_init(tsd_t *tsd);
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h b/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h
new file mode 100644
index 000000000000..c55bcd2723de
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h
@@ -0,0 +1,250 @@
+#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
+#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
+
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/util.h"
+
+static inline bool
+tcache_enabled_get(tsd_t *tsd) {
+ return tsd_tcache_enabled_get(tsd);
+}
+
+static inline void
+tcache_enabled_set(tsd_t *tsd, bool enabled) {
+ bool was_enabled = tsd_tcache_enabled_get(tsd);
+
+ if (!was_enabled && enabled) {
+ tsd_tcache_data_init(tsd);
+ } else if (was_enabled && !enabled) {
+ tcache_cleanup(tsd);
+ }
+ /* Commit the state last. Above calls check current state. */
+ tsd_tcache_enabled_set(tsd, enabled);
+ tsd_slow_update(tsd);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_event(tsd_t *tsd, tcache_t *tcache) {
+ if (TCACHE_GC_INCR == 0) {
+ return;
+ }
+
+ if (unlikely(ticker_tick(&tcache->gc_ticker))) {
+ tcache_event_hard(tsd, tcache);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
+ void *ret;
+
+ if (unlikely(tbin->ncached == 0)) {
+ tbin->low_water = -1;
+ *tcache_success = false;
+ return NULL;
+ }
+ /*
+ * tcache_success (instead of ret) should be checked upon the return of
+ * this function. We avoid checking (ret == NULL) because there is
+ * never a null stored on the avail stack (which is unknown to the
+ * compiler), and eagerly checking ret would cause pipeline stall
+ * (waiting for the cacheline).
+ */
+ *tcache_success = true;
+ ret = *(tbin->avail - tbin->ncached);
+ tbin->ncached--;
+
+ if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) {
+ tbin->low_water = tbin->ncached;
+ }
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ szind_t binind, bool zero, bool slow_path) {
+ void *ret;
+ tcache_bin_t *tbin;
+ bool tcache_success;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(binind < NBINS);
+ tbin = tcache_small_bin_get(tcache, binind);
+ ret = tcache_alloc_easy(tbin, &tcache_success);
+ assert(tcache_success == (ret != NULL));
+ if (unlikely(!tcache_success)) {
+ bool tcache_hard_success;
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL)) {
+ return NULL;
+ }
+
+ ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
+ tbin, binind, &tcache_hard_success);
+ if (tcache_hard_success == false) {
+ return NULL;
+ }
+ }
+
+ assert(ret);
+ /*
+ * Only compute usize if required. The checks in the following if
+ * statement are all static.
+ */
+ if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
+ usize = sz_index2size(binind);
+ assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
+ }
+
+ if (likely(!zero)) {
+ if (slow_path && config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
+ }
+ } else {
+ if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ memset(ret, 0, usize);
+ }
+
+ if (config_stats) {
+ tbin->tstats.nrequests++;
+ }
+ if (config_prof) {
+ tcache->prof_accumbytes += usize;
+ }
+ tcache_event(tsd, tcache);
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ szind_t binind, bool zero, bool slow_path) {
+ void *ret;
+ tcache_bin_t *tbin;
+ bool tcache_success;
+
+ assert(binind >= NBINS &&binind < nhbins);
+ tbin = tcache_large_bin_get(tcache, binind);
+ ret = tcache_alloc_easy(tbin, &tcache_success);
+ assert(tcache_success == (ret != NULL));
+ if (unlikely(!tcache_success)) {
+ /*
+ * Only allocate one large object at a time, because it's quite
+ * expensive to create one and not use it.
+ */
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL)) {
+ return NULL;
+ }
+
+ ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
+ if (ret == NULL) {
+ return NULL;
+ }
+ } else {
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ /* Only compute usize on demand */
+ if (config_prof || (slow_path && config_fill) ||
+ unlikely(zero)) {
+ usize = sz_index2size(binind);
+ assert(usize <= tcache_maxclass);
+ }
+
+ if (likely(!zero)) {
+ if (slow_path && config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ memset(ret, JEMALLOC_ALLOC_JUNK,
+ usize);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
+ }
+ } else {
+ memset(ret, 0, usize);
+ }
+
+ if (config_stats) {
+ tbin->tstats.nrequests++;
+ }
+ if (config_prof) {
+ tcache->prof_accumbytes += usize;
+ }
+ }
+
+ tcache_event(tsd, tcache);
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
+ bool slow_path) {
+ tcache_bin_t *tbin;
+ tcache_bin_info_t *tbin_info;
+
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
+
+ if (slow_path && config_fill && unlikely(opt_junk_free)) {
+ arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
+ }
+
+ tbin = tcache_small_bin_get(tcache, binind);
+ tbin_info = &tcache_bin_info[binind];
+ if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ (tbin_info->ncached_max >> 1));
+ }
+ assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->ncached++;
+ *(tbin->avail - tbin->ncached) = ptr;
+
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
+ bool slow_path) {
+ tcache_bin_t *tbin;
+ tcache_bin_info_t *tbin_info;
+
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
+
+ if (slow_path && config_fill && unlikely(opt_junk_free)) {
+ large_dalloc_junk(ptr, sz_index2size(binind));
+ }
+
+ tbin = tcache_large_bin_get(tcache, binind);
+ tbin_info = &tcache_bin_info[binind];
+ if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
+ tcache_bin_flush_large(tsd, tbin, binind,
+ (tbin_info->ncached_max >> 1), tcache);
+ }
+ assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->ncached++;
+ *(tbin->avail - tbin->ncached) = ptr;
+
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcaches_get(tsd_t *tsd, unsigned ind) {
+ tcaches_t *elm = &tcaches[ind];
+ if (unlikely(elm->tcache == NULL)) {
+ elm->tcache = tcache_create_explicit(tsd);
+ }
+ return elm->tcache;
+}
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h b/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h
new file mode 100644
index 000000000000..7eb516fb6b15
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h
@@ -0,0 +1,64 @@
+#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
+#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
+
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/stats_tsd.h"
+#include "jemalloc/internal/ticker.h"
+
+/*
+ * Read-only information associated with each element of tcache_t's tbins array
+ * is stored separately, mainly to reduce memory usage.
+ */
+struct tcache_bin_info_s {
+ unsigned ncached_max; /* Upper limit on ncached. */
+};
+
+struct tcache_bin_s {
+ low_water_t low_water; /* Min # cached since last GC. */
+ uint32_t ncached; /* # of cached objects. */
+ /*
+ * ncached and stats are both modified frequently. Let's keep them
+ * close so that they have a higher chance of being on the same
+ * cacheline, thus less write-backs.
+ */
+ tcache_bin_stats_t tstats;
+ /*
+ * To make use of adjacent cacheline prefetch, the items in the avail
+ * stack goes to higher address for newer allocations. avail points
+ * just above the available space, which means that
+ * avail[-ncached, ... -1] are available items and the lowest item will
+ * be allocated first.
+ */
+ void **avail; /* Stack of available objects. */
+};
+
+struct tcache_s {
+ /* Data accessed frequently first: prof, ticker and small bins. */
+ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
+ ticker_t gc_ticker; /* Drives incremental GC. */
+ /*
+ * The pointer stacks associated with tbins follow as a contiguous
+ * array. During tcache initialization, the avail pointer in each
+ * element of tbins is initialized to point to the proper offset within
+ * this array.
+ */
+ tcache_bin_t tbins_small[NBINS];
+ /* Data accessed less often below. */
+ ql_elm(tcache_t) link; /* Used for aggregating stats. */
+ arena_t *arena; /* Associated arena. */
+ szind_t next_gc_bin; /* Next bin to GC. */
+ /* For small bins, fill (ncached_max >> lg_fill_div). */
+ uint8_t lg_fill_div[NBINS];
+ tcache_bin_t tbins_large[NSIZES-NBINS];
+};
+
+/* Linkage for list of available (previously used) explicit tcache IDs. */
+struct tcaches_s {
+ union {
+ tcache_t *tcache;
+ tcaches_t *next;
+ };
+};
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_types.h b/contrib/jemalloc/include/jemalloc/internal/tcache_types.h
new file mode 100644
index 000000000000..1155d62cb44b
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_types.h
@@ -0,0 +1,61 @@
+#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
+#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
+
+#include "jemalloc/internal/size_classes.h"
+
+typedef struct tcache_bin_info_s tcache_bin_info_t;
+typedef struct tcache_bin_s tcache_bin_t;
+typedef struct tcache_s tcache_t;
+typedef struct tcaches_s tcaches_t;
+
+/* ncached is cast to this type for comparison. */
+typedef int32_t low_water_t;
+
+/*
+ * tcache pointers close to NULL are used to encode state information that is
+ * used for two purposes: preventing thread caching on a per thread basis and
+ * cleaning up during thread shutdown.
+ */
+#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
+#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
+#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
+#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
+
+/*
+ * Absolute minimum number of cache slots for each small bin.
+ */
+#define TCACHE_NSLOTS_SMALL_MIN 20
+
+/*
+ * Absolute maximum number of cache slots for each small bin in the thread
+ * cache. This is an additional constraint beyond that imposed as: twice the
+ * number of regions per slab for this size class.
+ *
+ * This constant must be an even number.
+ */
+#define TCACHE_NSLOTS_SMALL_MAX 200
+
+/* Number of cache slots for large size classes. */
+#define TCACHE_NSLOTS_LARGE 20
+
+/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
+#define LG_TCACHE_MAXCLASS_DEFAULT 15
+
+/*
+ * TCACHE_GC_SWEEP is the approximate number of allocation events between
+ * full GC sweeps. Integer rounding may cause the actual number to be
+ * slightly higher, since GC is performed incrementally.
+ */
+#define TCACHE_GC_SWEEP 8192
+
+/* Number of tcache allocation/deallocation events between incremental GCs. */
+#define TCACHE_GC_INCR \
+ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
+
+/* Used in TSD static initializer only. Real init in tcache_data_init(). */
+#define TCACHE_ZERO_INITIALIZER {0}
+
+/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
+#define TCACHE_ENABLED_ZERO_INITIALIZER false
+
+#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ticker.h b/contrib/jemalloc/include/jemalloc/internal/ticker.h
index 4696e56d2573..572b96459cc5 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ticker.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ticker.h
@@ -1,75 +1,50 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct ticker_s ticker_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct ticker_s {
- int32_t tick;
- int32_t nticks;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void ticker_init(ticker_t *ticker, int32_t nticks);
-void ticker_copy(ticker_t *ticker, const ticker_t *other);
-int32_t ticker_read(const ticker_t *ticker);
-bool ticker_ticks(ticker_t *ticker, int32_t nticks);
-bool ticker_tick(ticker_t *ticker);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
-JEMALLOC_INLINE void
-ticker_init(ticker_t *ticker, int32_t nticks)
-{
-
+#ifndef JEMALLOC_INTERNAL_TICKER_H
+#define JEMALLOC_INTERNAL_TICKER_H
+
+#include "jemalloc/internal/util.h"
+
+/**
+ * A ticker makes it easy to count-down events until some limit. You
+ * ticker_init the ticker to trigger every nticks events. You then notify it
+ * that an event has occurred with calls to ticker_tick (or that nticks events
+ * have occurred with a call to ticker_ticks), which will return true (and reset
+ * the counter) if the countdown hit zero.
+ */
+
+typedef struct {
+ int32_t tick;
+ int32_t nticks;
+} ticker_t;
+
+static inline void
+ticker_init(ticker_t *ticker, int32_t nticks) {
ticker->tick = nticks;
ticker->nticks = nticks;
}
-JEMALLOC_INLINE void
-ticker_copy(ticker_t *ticker, const ticker_t *other)
-{
-
+static inline void
+ticker_copy(ticker_t *ticker, const ticker_t *other) {
*ticker = *other;
}
-JEMALLOC_INLINE int32_t
-ticker_read(const ticker_t *ticker)
-{
-
- return (ticker->tick);
+static inline int32_t
+ticker_read(const ticker_t *ticker) {
+ return ticker->tick;
}
-JEMALLOC_INLINE bool
-ticker_ticks(ticker_t *ticker, int32_t nticks)
-{
-
+static inline bool
+ticker_ticks(ticker_t *ticker, int32_t nticks) {
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
- return (true);
+ return true;
}
ticker->tick -= nticks;
return(false);
}
-JEMALLOC_INLINE bool
-ticker_tick(ticker_t *ticker)
-{
-
- return (ticker_ticks(ticker, 1));
+static inline bool
+ticker_tick(ticker_t *ticker) {
+ return ticker_ticks(ticker, 1);
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_TICKER_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd.h b/contrib/jemalloc/include/jemalloc/internal/tsd.h
index 9f3743357b1b..631fbf1f7a47 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tsd.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd.h
@@ -1,631 +1,122 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/* Maximum number of malloc_tsd users with cleanup functions. */
-#define MALLOC_TSD_CLEANUPS_MAX 2
-
-typedef bool (*malloc_tsd_cleanup_t)(void);
-
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
- !defined(_WIN32))
-typedef struct tsd_init_block_s tsd_init_block_t;
-typedef struct tsd_init_head_s tsd_init_head_t;
-#endif
-
-typedef struct tsd_s tsd_t;
-typedef struct tsdn_s tsdn_t;
-
-#define TSDN_NULL ((tsdn_t *)0)
-
-typedef enum {
- tsd_state_uninitialized,
- tsd_state_nominal,
- tsd_state_purgatory,
- tsd_state_reincarnated
-} tsd_state_t;
+#ifndef JEMALLOC_INTERNAL_TSD_H
+#define JEMALLOC_INTERNAL_TSD_H
+
+#include "jemalloc/internal/arena_types.h"
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/jemalloc_internal_externs.h"
+#include "jemalloc/internal/prof_types.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/rtree_tsd.h"
+#include "jemalloc/internal/tcache_types.h"
+#include "jemalloc/internal/tcache_structs.h"
+#include "jemalloc/internal/util.h"
+#include "jemalloc/internal/witness.h"
/*
- * TLS/TSD-agnostic macro-based implementation of thread-specific data. There
- * are five macros that support (at least) three use cases: file-private,
- * library-private, and library-private inlined. Following is an example
- * library-private tsd variable:
- *
- * In example.h:
- * typedef struct {
- * int x;
- * int y;
- * } example_t;
- * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
- * malloc_tsd_types(example_, example_t)
- * malloc_tsd_protos(, example_, example_t)
- * malloc_tsd_externs(example_, example_t)
- * In example.c:
- * malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
- * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
- * example_tsd_cleanup)
- *
- * The result is a set of generated functions, e.g.:
+ * Thread-Specific-Data layout
+ * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
+ * s: state
+ * e: tcache_enabled
+ * m: thread_allocated (config_stats)
+ * f: thread_deallocated (config_stats)
+ * p: prof_tdata (config_prof)
+ * c: rtree_ctx (rtree cache accessed on deallocation)
+ * t: tcache
+ * --- data not accessed on tcache fast path: arena-related fields ---
+ * d: arenas_tdata_bypass
+ * r: reentrancy_level
+ * x: narenas_tdata
+ * i: iarena
+ * a: arena
+ * o: arenas_tdata
+ * Loading TSD data is on the critical path of basically all malloc operations.
+ * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
+ * Use a compact layout to reduce cache footprint.
+ * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
+ * |---------------------------- 1st cacheline ----------------------------|
+ * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
+ * |---------------------------- 2nd cacheline ----------------------------|
+ * | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
+ * |---------------------------- 3nd cacheline ----------------------------|
+ * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
+ * +-------------------------------------------------------------------------+
+ * Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
- * bool example_tsd_boot(void) {...}
- * bool example_tsd_booted_get(void) {...}
- * example_t *example_tsd_get(bool init) {...}
- * void example_tsd_set(example_t *val) {...}
- *
- * Note that all of the functions deal in terms of (a_type *) rather than
- * (a_type) so that it is possible to support non-pointer types (unlike
- * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
- * cast to (void *). This means that the cleanup function needs to cast the
- * function argument to (a_type *), then dereference the resulting pointer to
- * access fields, e.g.
- *
- * void
- * example_tsd_cleanup(void *arg)
- * {
- * example_t *example = (example_t *)arg;
- *
- * example->x = 42;
- * [...]
- * if ([want the cleanup function to be called again])
- * example_tsd_set(example);
- * }
- *
- * If example_tsd_set() is called within example_tsd_cleanup(), it will be
- * called again. This is similar to how pthreads TSD destruction works, except
- * that pthreads only calls the cleanup function again if the value was set to
- * non-NULL.
+ * The last 3 members (i, a and o) before tcache isn't really needed on tcache
+ * fast path. However we have a number of unused tcache bins and witnesses
+ * (never touched unless config_debug) at the end of tcache, so we place them
+ * there to avoid breaking the cachelines and possibly paging in an extra page.
*/
-
-/* malloc_tsd_types(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define malloc_tsd_types(a_name, a_type)
-#elif (defined(JEMALLOC_TLS))
-#define malloc_tsd_types(a_name, a_type)
-#elif (defined(_WIN32))
-#define malloc_tsd_types(a_name, a_type) \
-typedef struct { \
- bool initialized; \
- a_type val; \
-} a_name##tsd_wrapper_t;
-#else
-#define malloc_tsd_types(a_name, a_type) \
-typedef struct { \
- bool initialized; \
- a_type val; \
-} a_name##tsd_wrapper_t;
-#endif
-
-/* malloc_tsd_protos(). */
-#define malloc_tsd_protos(a_attr, a_name, a_type) \
-a_attr bool \
-a_name##tsd_boot0(void); \
-a_attr void \
-a_name##tsd_boot1(void); \
-a_attr bool \
-a_name##tsd_boot(void); \
-a_attr bool \
-a_name##tsd_booted_get(void); \
-a_attr a_type * \
-a_name##tsd_get(bool init); \
-a_attr void \
-a_name##tsd_set(a_type *val);
-
-/* malloc_tsd_externs(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define malloc_tsd_externs(a_name, a_type) \
-extern __thread a_type a_name##tsd_tls; \
-extern __thread bool a_name##tsd_initialized; \
-extern bool a_name##tsd_booted;
-#elif (defined(JEMALLOC_TLS))
-#define malloc_tsd_externs(a_name, a_type) \
-extern __thread a_type a_name##tsd_tls; \
-extern pthread_key_t a_name##tsd_tsd; \
-extern bool a_name##tsd_booted;
-#elif (defined(_WIN32))
-#define malloc_tsd_externs(a_name, a_type) \
-extern DWORD a_name##tsd_tsd; \
-extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
-extern bool a_name##tsd_booted;
-#else
-#define malloc_tsd_externs(a_name, a_type) \
-extern pthread_key_t a_name##tsd_tsd; \
-extern tsd_init_head_t a_name##tsd_init_head; \
-extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
-extern bool a_name##tsd_booted;
-#endif
-
-/* malloc_tsd_data(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
-a_attr __thread a_type JEMALLOC_TLS_MODEL \
- a_name##tsd_tls = a_initializer; \
-a_attr __thread bool JEMALLOC_TLS_MODEL \
- a_name##tsd_initialized = false; \
-a_attr bool a_name##tsd_booted = false;
-#elif (defined(JEMALLOC_TLS))
-#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
-a_attr __thread a_type JEMALLOC_TLS_MODEL \
- a_name##tsd_tls = a_initializer; \
-a_attr pthread_key_t a_name##tsd_tsd; \
-a_attr bool a_name##tsd_booted = false;
-#elif (defined(_WIN32))
-#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
-a_attr DWORD a_name##tsd_tsd; \
-a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
- false, \
- a_initializer \
-}; \
-a_attr bool a_name##tsd_booted = false;
+#ifdef JEMALLOC_JET
+typedef void (*test_callback_t)(int *);
+# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
+# define MALLOC_TEST_TSD \
+ O(test_data, int, int) \
+ O(test_callback, test_callback_t, int)
+# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
#else
-#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
-a_attr pthread_key_t a_name##tsd_tsd; \
-a_attr tsd_init_head_t a_name##tsd_init_head = { \
- ql_head_initializer(blocks), \
- MALLOC_MUTEX_INITIALIZER \
-}; \
-a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
- false, \
- a_initializer \
-}; \
-a_attr bool a_name##tsd_booted = false;
+# define MALLOC_TEST_TSD
+# define MALLOC_TEST_TSD_INITIALIZER
#endif
-/* malloc_tsd_funcs(). */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
- a_cleanup) \
-/* Initialization/cleanup. */ \
-a_attr bool \
-a_name##tsd_cleanup_wrapper(void) \
-{ \
- \
- if (a_name##tsd_initialized) { \
- a_name##tsd_initialized = false; \
- a_cleanup(&a_name##tsd_tls); \
- } \
- return (a_name##tsd_initialized); \
-} \
-a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
- \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- malloc_tsd_cleanup_register( \
- &a_name##tsd_cleanup_wrapper); \
- } \
- a_name##tsd_booted = true; \
- return (false); \
-} \
-a_attr void \
-a_name##tsd_boot1(void) \
-{ \
- \
- /* Do nothing. */ \
-} \
-a_attr bool \
-a_name##tsd_boot(void) \
-{ \
- \
- return (a_name##tsd_boot0()); \
-} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (false); \
-} \
-/* Get/set. */ \
-a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
- \
- assert(a_name##tsd_booted); \
- return (&a_name##tsd_tls); \
-} \
-a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
- \
- assert(a_name##tsd_booted); \
- a_name##tsd_tls = (*val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
- a_name##tsd_initialized = true; \
-}
-#elif (defined(JEMALLOC_TLS))
-#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
- a_cleanup) \
-/* Initialization/cleanup. */ \
-a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
- \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
- 0) \
- return (true); \
- } \
- a_name##tsd_booted = true; \
- return (false); \
-} \
-a_attr void \
-a_name##tsd_boot1(void) \
-{ \
- \
- /* Do nothing. */ \
-} \
-a_attr bool \
-a_name##tsd_boot(void) \
-{ \
- \
- return (a_name##tsd_boot0()); \
-} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (false); \
-} \
-/* Get/set. */ \
-a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
- \
- assert(a_name##tsd_booted); \
- return (&a_name##tsd_tls); \
-} \
-a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
- \
- assert(a_name##tsd_booted); \
- a_name##tsd_tls = (*val); \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- if (pthread_setspecific(a_name##tsd_tsd, \
- (void *)(&a_name##tsd_tls))) { \
- malloc_write("<jemalloc>: Error" \
- " setting TSD for "#a_name"\n"); \
- if (opt_abort) \
- abort(); \
- } \
- } \
-}
-#elif (defined(_WIN32))
-#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
- a_cleanup) \
-/* Initialization/cleanup. */ \
-a_attr bool \
-a_name##tsd_cleanup_wrapper(void) \
-{ \
- DWORD error = GetLastError(); \
- a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
- TlsGetValue(a_name##tsd_tsd); \
- SetLastError(error); \
- \
- if (wrapper == NULL) \
- return (false); \
- if (a_cleanup != malloc_tsd_no_cleanup && \
- wrapper->initialized) { \
- wrapper->initialized = false; \
- a_cleanup(&wrapper->val); \
- if (wrapper->initialized) { \
- /* Trigger another cleanup round. */ \
- return (true); \
- } \
- } \
- malloc_tsd_dalloc(wrapper); \
- return (false); \
-} \
-a_attr void \
-a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
-{ \
- \
- if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
- malloc_write("<jemalloc>: Error setting" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } \
-} \
-a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
-{ \
- DWORD error = GetLastError(); \
- a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
- TlsGetValue(a_name##tsd_tsd); \
- SetLastError(error); \
- \
- if (init && unlikely(wrapper == NULL)) { \
- wrapper = (a_name##tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
- if (wrapper == NULL) { \
- malloc_write("<jemalloc>: Error allocating" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } else { \
- wrapper->initialized = false; \
- wrapper->val = a_initializer; \
- } \
- a_name##tsd_wrapper_set(wrapper); \
- } \
- return (wrapper); \
-} \
-a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
- \
- a_name##tsd_tsd = TlsAlloc(); \
- if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
- return (true); \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- malloc_tsd_cleanup_register( \
- &a_name##tsd_cleanup_wrapper); \
- } \
- a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
- a_name##tsd_booted = true; \
- return (false); \
-} \
-a_attr void \
-a_name##tsd_boot1(void) \
-{ \
- a_name##tsd_wrapper_t *wrapper; \
- wrapper = (a_name##tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
- if (wrapper == NULL) { \
- malloc_write("<jemalloc>: Error allocating" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } \
- memcpy(wrapper, &a_name##tsd_boot_wrapper, \
- sizeof(a_name##tsd_wrapper_t)); \
- a_name##tsd_wrapper_set(wrapper); \
-} \
-a_attr bool \
-a_name##tsd_boot(void) \
-{ \
- \
- if (a_name##tsd_boot0()) \
- return (true); \
- a_name##tsd_boot1(); \
- return (false); \
-} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (true); \
-} \
-/* Get/set. */ \
-a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
- a_name##tsd_wrapper_t *wrapper; \
- \
- assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
- return (NULL); \
- return (&wrapper->val); \
-} \
-a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
- a_name##tsd_wrapper_t *wrapper; \
- \
- assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(true); \
- wrapper->val = *(val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
- wrapper->initialized = true; \
-}
-#else
-#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
- a_cleanup) \
-/* Initialization/cleanup. */ \
-a_attr void \
-a_name##tsd_cleanup_wrapper(void *arg) \
-{ \
- a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
- \
- if (a_cleanup != malloc_tsd_no_cleanup && \
- wrapper->initialized) { \
- wrapper->initialized = false; \
- a_cleanup(&wrapper->val); \
- if (wrapper->initialized) { \
- /* Trigger another cleanup round. */ \
- if (pthread_setspecific(a_name##tsd_tsd, \
- (void *)wrapper)) { \
- malloc_write("<jemalloc>: Error" \
- " setting TSD for "#a_name"\n"); \
- if (opt_abort) \
- abort(); \
- } \
- return; \
- } \
- } \
- malloc_tsd_dalloc(wrapper); \
-} \
-a_attr void \
-a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
-{ \
- \
- if (pthread_setspecific(a_name##tsd_tsd, \
- (void *)wrapper)) { \
- malloc_write("<jemalloc>: Error setting" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } \
-} \
-a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
-{ \
- a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
- pthread_getspecific(a_name##tsd_tsd); \
- \
- if (init && unlikely(wrapper == NULL)) { \
- tsd_init_block_t block; \
- wrapper = (a_name##tsd_wrapper_t *) \
- tsd_init_check_recursion(&a_name##tsd_init_head, \
- &block); \
- if (wrapper) \
- return (wrapper); \
- wrapper = (a_name##tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
- block.data = (void *)wrapper; \
- if (wrapper == NULL) { \
- malloc_write("<jemalloc>: Error allocating" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } else { \
- wrapper->initialized = false; \
- wrapper->val = a_initializer; \
- } \
- a_name##tsd_wrapper_set(wrapper); \
- tsd_init_finish(&a_name##tsd_init_head, &block); \
- } \
- return (wrapper); \
-} \
-a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
- \
- if (pthread_key_create(&a_name##tsd_tsd, \
- a_name##tsd_cleanup_wrapper) != 0) \
- return (true); \
- a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
- a_name##tsd_booted = true; \
- return (false); \
-} \
-a_attr void \
-a_name##tsd_boot1(void) \
-{ \
- a_name##tsd_wrapper_t *wrapper; \
- wrapper = (a_name##tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
- if (wrapper == NULL) { \
- malloc_write("<jemalloc>: Error allocating" \
- " TSD for "#a_name"\n"); \
- abort(); \
- } \
- memcpy(wrapper, &a_name##tsd_boot_wrapper, \
- sizeof(a_name##tsd_wrapper_t)); \
- a_name##tsd_wrapper_set(wrapper); \
-} \
-a_attr bool \
-a_name##tsd_boot(void) \
-{ \
- \
- if (a_name##tsd_boot0()) \
- return (true); \
- a_name##tsd_boot1(); \
- return (false); \
-} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (true); \
-} \
-/* Get/set. */ \
-a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
- a_name##tsd_wrapper_t *wrapper; \
- \
- assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
- return (NULL); \
- return (&wrapper->val); \
-} \
-a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
- a_name##tsd_wrapper_t *wrapper; \
- \
- assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(true); \
- wrapper->val = *(val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
- wrapper->initialized = true; \
-}
-#endif
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
- !defined(_WIN32))
-struct tsd_init_block_s {
- ql_elm(tsd_init_block_t) link;
- pthread_t thread;
- void *data;
-};
-struct tsd_init_head_s {
- ql_head(tsd_init_block_t) blocks;
- malloc_mutex_t lock;
-};
-#endif
-
-#define MALLOC_TSD \
-/* O(name, type) */ \
- O(tcache, tcache_t *) \
- O(thread_allocated, uint64_t) \
- O(thread_deallocated, uint64_t) \
- O(prof_tdata, prof_tdata_t *) \
- O(iarena, arena_t *) \
- O(arena, arena_t *) \
- O(arenas_tdata, arena_tdata_t *) \
- O(narenas_tdata, unsigned) \
- O(arenas_tdata_bypass, bool) \
- O(tcache_enabled, tcache_enabled_t) \
- O(quarantine, quarantine_t *) \
- O(witnesses, witness_list_t) \
- O(witness_fork, bool) \
-
-#define TSD_INITIALIZER { \
+/* O(name, type, nullable type */
+#define MALLOC_TSD \
+ O(tcache_enabled, bool, bool) \
+ O(arenas_tdata_bypass, bool, bool) \
+ O(reentrancy_level, int8_t, int8_t) \
+ O(narenas_tdata, uint32_t, uint32_t) \
+ O(thread_allocated, uint64_t, uint64_t) \
+ O(thread_deallocated, uint64_t, uint64_t) \
+ O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
+ O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
+ O(iarena, arena_t *, arena_t *) \
+ O(arena, arena_t *, arena_t *) \
+ O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
+ O(tcache, tcache_t, tcache_t) \
+ O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
+ MALLOC_TEST_TSD
+
+#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
- NULL, \
+ TCACHE_ENABLED_ZERO_INITIALIZER, \
+ false, \
+ 0, \
+ 0, \
0, \
0, \
NULL, \
+ RTREE_CTX_ZERO_INITIALIZER, \
NULL, \
NULL, \
NULL, \
- 0, \
- false, \
- tcache_enabled_default, \
- NULL, \
- ql_head_initializer(witnesses), \
- false \
+ TCACHE_ZERO_INITIALIZER, \
+ WITNESS_TSD_INITIALIZER \
+ MALLOC_TEST_TSD_INITIALIZER \
}
+enum {
+ tsd_state_nominal = 0, /* Common case --> jnz. */
+ tsd_state_nominal_slow = 1, /* Initialized but on slow path. */
+ /* the above 2 nominal states should be lower values. */
+ tsd_state_nominal_max = 1, /* used for comparison only. */
+ tsd_state_purgatory = 2,
+ tsd_state_reincarnated = 3,
+ tsd_state_uninitialized = 4
+};
+
+/* Manually limit tsd_state_t to a single byte. */
+typedef uint8_t tsd_state_t;
+
+/* The actual tsd. */
struct tsd_s {
+ /*
+ * The contents should be treated as totally opaque outside the tsd
+ * module. Access any thread-local state through the getters and
+ * setters below.
+ */
tsd_state_t state;
-#define O(n, t) \
- t n;
+#define O(n, t, nt) \
+ t use_a_getter_or_setter_instead_##n;
MALLOC_TSD
#undef O
};
@@ -636,153 +127,184 @@ MALLOC_TSD
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
- tsd_t tsd;
+ tsd_t tsd;
};
+#define TSDN_NULL ((tsdn_t *)0)
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsd_tsdn(tsd_t *tsd) {
+ return (tsdn_t *)tsd;
+}
-static const tsd_t tsd_initializer = TSD_INITIALIZER;
-
-malloc_tsd_types(, tsd_t)
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void *malloc_tsd_malloc(size_t size);
-void malloc_tsd_dalloc(void *wrapper);
-void malloc_tsd_no_cleanup(void *arg);
-void malloc_tsd_cleanup_register(bool (*f)(void));
-tsd_t *malloc_tsd_boot0(void);
-void malloc_tsd_boot1(void);
-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
- !defined(_WIN32))
-void *tsd_init_check_recursion(tsd_init_head_t *head,
- tsd_init_block_t *block);
-void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
-#endif
-void tsd_cleanup(void *arg);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
-
-tsd_t *tsd_fetch_impl(bool init);
-tsd_t *tsd_fetch(void);
-tsdn_t *tsd_tsdn(tsd_t *tsd);
-bool tsd_nominal(tsd_t *tsd);
-#define O(n, t) \
-t *tsd_##n##p_get(tsd_t *tsd); \
-t tsd_##n##_get(tsd_t *tsd); \
-void tsd_##n##_set(tsd_t *tsd, t n);
-MALLOC_TSD
-#undef O
-tsdn_t *tsdn_fetch(void);
-bool tsdn_null(const tsdn_t *tsdn);
-tsd_t *tsdn_tsd(tsdn_t *tsdn);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
-malloc_tsd_externs(, tsd_t)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init)
-{
- tsd_t *tsd = tsd_get(init);
-
- if (!init && tsd_get_allocates() && tsd == NULL)
- return (NULL);
- assert(tsd != NULL);
-
- if (unlikely(tsd->state != tsd_state_nominal)) {
- if (tsd->state == tsd_state_uninitialized) {
- tsd->state = tsd_state_nominal;
- /* Trigger cleanup handler registration. */
- tsd_set(tsd);
- } else if (tsd->state == tsd_state_purgatory) {
- tsd->state = tsd_state_reincarnated;
- tsd_set(tsd);
- } else
- assert(tsd->state == tsd_state_reincarnated);
- }
-
- return (tsd);
+JEMALLOC_ALWAYS_INLINE bool
+tsdn_null(const tsdn_t *tsdn) {
+ return tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void)
-{
+tsdn_tsd(tsdn_t *tsdn) {
+ assert(!tsdn_null(tsdn));
- return (tsd_fetch_impl(true));
+ return &tsdn->tsd;
}
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd)
-{
+void *malloc_tsd_malloc(size_t size);
+void malloc_tsd_dalloc(void *wrapper);
+void malloc_tsd_cleanup_register(bool (*f)(void));
+tsd_t *malloc_tsd_boot0(void);
+void malloc_tsd_boot1(void);
+void tsd_cleanup(void *arg);
+tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
+void tsd_slow_update(tsd_t *tsd);
- return ((tsdn_t *)tsd);
+/*
+ * We put the platform-specific data declarations and inlines into their own
+ * header files to avoid cluttering this file. They define tsd_boot0,
+ * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
+ */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
+#elif (defined(JEMALLOC_TLS))
+#include "jemalloc/internal/tsd_tls.h"
+#elif (defined(_WIN32))
+#include "jemalloc/internal/tsd_win.h"
+#else
+#include "jemalloc/internal/tsd_generic.h"
+#endif
+
+/*
+ * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
+ * foo. This omits some safety checks, and so can be used during tsd
+ * initialization and cleanup.
+ */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE t * \
+tsd_##n##p_get_unsafe(tsd_t *tsd) { \
+ return &tsd->use_a_getter_or_setter_instead_##n; \
}
+MALLOC_TSD
+#undef O
-JEMALLOC_INLINE bool
-tsd_nominal(tsd_t *tsd)
-{
+/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE t * \
+tsd_##n##p_get(tsd_t *tsd) { \
+ assert(tsd->state == tsd_state_nominal || \
+ tsd->state == tsd_state_nominal_slow || \
+ tsd->state == tsd_state_reincarnated); \
+ return tsd_##n##p_get_unsafe(tsd); \
+}
+MALLOC_TSD
+#undef O
- return (tsd->state == tsd_state_nominal);
+/*
+ * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
+ * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
+ */
+#define O(n, t, nt) \
+JEMALLOC_ALWAYS_INLINE nt * \
+tsdn_##n##p_get(tsdn_t *tsdn) { \
+ if (tsdn_null(tsdn)) { \
+ return NULL; \
+ } \
+ tsd_t *tsd = tsdn_tsd(tsdn); \
+ return (nt *)tsd_##n##p_get(tsd); \
}
+MALLOC_TSD
+#undef O
-#define O(n, t) \
-JEMALLOC_ALWAYS_INLINE t * \
-tsd_##n##p_get(tsd_t *tsd) \
-{ \
- \
- return (&tsd->n); \
-} \
- \
+/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
+#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE t \
-tsd_##n##_get(tsd_t *tsd) \
-{ \
- \
- return (*tsd_##n##p_get(tsd)); \
-} \
- \
+tsd_##n##_get(tsd_t *tsd) { \
+ return *tsd_##n##p_get(tsd); \
+}
+MALLOC_TSD
+#undef O
+
+/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
+#define O(n, t, nt) \
JEMALLOC_ALWAYS_INLINE void \
-tsd_##n##_set(tsd_t *tsd, t n) \
-{ \
- \
- assert(tsd->state == tsd_state_nominal); \
- tsd->n = n; \
+tsd_##n##_set(tsd_t *tsd, t val) { \
+ assert(tsd->state != tsd_state_reincarnated); \
+ *tsd_##n##p_get(tsd) = val; \
}
MALLOC_TSD
#undef O
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void)
-{
+JEMALLOC_ALWAYS_INLINE void
+tsd_assert_fast(tsd_t *tsd) {
+ assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
+ tsd_reentrancy_level_get(tsd) == 0);
+}
- if (!tsd_booted_get())
- return (NULL);
+JEMALLOC_ALWAYS_INLINE bool
+tsd_fast(tsd_t *tsd) {
+ bool fast = (tsd->state == tsd_state_nominal);
+ if (fast) {
+ tsd_assert_fast(tsd);
+ }
- return (tsd_tsdn(tsd_fetch_impl(false)));
+ return fast;
}
-JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn)
-{
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch_impl(bool init, bool internal) {
+ tsd_t *tsd = tsd_get(init);
- return (tsdn == NULL);
+ if (!init && tsd_get_allocates() && tsd == NULL) {
+ return NULL;
+ }
+ assert(tsd != NULL);
+
+ if (unlikely(tsd->state != tsd_state_nominal)) {
+ return tsd_fetch_slow(tsd, internal);
+ }
+ assert(tsd_fast(tsd));
+ tsd_assert_fast(tsd);
+
+ return tsd;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn)
-{
+tsd_internal_fetch(void) {
+ return tsd_fetch_impl(true, true);
+}
- assert(!tsdn_null(tsdn));
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch(void) {
+ return tsd_fetch_impl(true, false);
+}
- return (&tsdn->tsd);
+static inline bool
+tsd_nominal(tsd_t *tsd) {
+ return (tsd->state <= tsd_state_nominal_max);
+}
+
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsdn_fetch(void) {
+ if (!tsd_booted_get()) {
+ return NULL;
+ }
+
+ return tsd_tsdn(tsd_fetch_impl(false, false));
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
+tsd_rtree_ctx(tsd_t *tsd) {
+ return tsd_rtree_ctxp_get(tsd);
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
+tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
+ /*
+ * If tsd cannot be accessed, initialize the fallback rtree_ctx and
+ * return a pointer to it.
+ */
+ if (unlikely(tsdn_null(tsdn))) {
+ rtree_ctx_data_init(fallback);
+ return fallback;
+ }
+ return tsd_rtree_ctx(tsdn_tsd(tsdn));
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_TSD_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h b/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h
new file mode 100644
index 000000000000..1e52ef767f16
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h
@@ -0,0 +1,157 @@
+#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_GENERIC_H
+
+typedef struct tsd_init_block_s tsd_init_block_t;
+struct tsd_init_block_s {
+ ql_elm(tsd_init_block_t) link;
+ pthread_t thread;
+ void *data;
+};
+
+/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
+typedef struct tsd_init_head_s tsd_init_head_t;
+
+typedef struct {
+ bool initialized;
+ tsd_t val;
+} tsd_wrapper_t;
+
+void *tsd_init_check_recursion(tsd_init_head_t *head,
+ tsd_init_block_t *block);
+void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
+
+extern pthread_key_t tsd_tsd;
+extern tsd_init_head_t tsd_init_head;
+extern tsd_wrapper_t tsd_boot_wrapper;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE void
+tsd_cleanup_wrapper(void *arg) {
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
+
+ if (wrapper->initialized) {
+ wrapper->initialized = false;
+ tsd_cleanup(&wrapper->val);
+ if (wrapper->initialized) {
+ /* Trigger another cleanup round. */
+ if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
+ {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+ return;
+ }
+ }
+ malloc_tsd_dalloc(wrapper);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_wrapper_set(tsd_wrapper_t *wrapper) {
+ if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ abort();
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
+tsd_wrapper_get(bool init) {
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
+
+ if (init && unlikely(wrapper == NULL)) {
+ tsd_init_block_t block;
+ wrapper = (tsd_wrapper_t *)
+ tsd_init_check_recursion(&tsd_init_head, &block);
+ if (wrapper) {
+ return wrapper;
+ }
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ block.data = (void *)wrapper;
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ } else {
+ wrapper->initialized = false;
+ tsd_t initializer = TSD_INITIALIZER;
+ wrapper->val = initializer;
+ }
+ tsd_wrapper_set(wrapper);
+ tsd_init_finish(&tsd_init_head, &block);
+ }
+ return wrapper;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
+ return true;
+ }
+ tsd_wrapper_set(&tsd_boot_wrapper);
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ tsd_wrapper_t *wrapper;
+ wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ }
+ tsd_boot_wrapper.initialized = false;
+ tsd_cleanup(&tsd_boot_wrapper.val);
+ wrapper->initialized = false;
+ tsd_t initializer = TSD_INITIALIZER;
+ wrapper->val = initializer;
+ tsd_wrapper_set(wrapper);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ if (tsd_boot0()) {
+ return true;
+ }
+ tsd_boot1();
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return true;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(init);
+ if (tsd_get_allocates() && !init && wrapper == NULL) {
+ return NULL;
+ }
+ return &wrapper->val;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(true);
+ if (likely(&wrapper->val != val)) {
+ wrapper->val = *(val);
+ }
+ wrapper->initialized = true;
+}
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
new file mode 100644
index 000000000000..beb467a67e91
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
@@ -0,0 +1,60 @@
+#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
+
+extern __thread tsd_t tsd_tls;
+extern __thread bool tsd_initialized;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE bool
+tsd_cleanup_wrapper(void) {
+ if (tsd_initialized) {
+ tsd_initialized = false;
+ tsd_cleanup(&tsd_tls);
+ }
+ return tsd_initialized;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ /* Do nothing. */
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ return tsd_boot0();
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return false;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ assert(tsd_booted);
+ return &tsd_tls;
+}
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ assert(tsd_booted);
+ if (likely(&tsd_tls != val)) {
+ tsd_tls = (*val);
+ }
+ tsd_initialized = true;
+}
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_tls.h b/contrib/jemalloc/include/jemalloc/internal/tsd_tls.h
new file mode 100644
index 000000000000..757aaa0eeff8
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_tls.h
@@ -0,0 +1,59 @@
+#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_TLS_H
+
+extern __thread tsd_t tsd_tls;
+extern pthread_key_t tsd_tsd;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) {
+ return true;
+ }
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ /* Do nothing. */
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ return tsd_boot0();
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return false;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ assert(tsd_booted);
+ return &tsd_tls;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ assert(tsd_booted);
+ if (likely(&tsd_tls != val)) {
+ tsd_tls = (*val);
+ }
+ if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) {
+ malloc_write("<jemalloc>: Error setting tsd.\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+}
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_types.h b/contrib/jemalloc/include/jemalloc/internal/tsd_types.h
new file mode 100644
index 000000000000..6200af61f3dc
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_types.h
@@ -0,0 +1,10 @@
+#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
+#define JEMALLOC_INTERNAL_TSD_TYPES_H
+
+#define MALLOC_TSD_CLEANUPS_MAX 2
+
+typedef struct tsd_s tsd_t;
+typedef struct tsdn_s tsdn_t;
+typedef bool (*malloc_tsd_cleanup_t)(void);
+
+#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/util.h b/contrib/jemalloc/include/jemalloc/internal/util.h
index 4b56d652ed31..304cb545afcb 100644
--- a/contrib/jemalloc/include/jemalloc/internal/util.h
+++ b/contrib/jemalloc/include/jemalloc/internal/util.h
@@ -1,44 +1,7 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_UTIL_H
+#define JEMALLOC_INTERNAL_UTIL_H
-#ifdef _WIN32
-# ifdef _WIN64
-# define FMT64_PREFIX "ll"
-# define FMTPTR_PREFIX "ll"
-# else
-# define FMT64_PREFIX "ll"
-# define FMTPTR_PREFIX ""
-# endif
-# define FMTd32 "d"
-# define FMTu32 "u"
-# define FMTx32 "x"
-# define FMTd64 FMT64_PREFIX "d"
-# define FMTu64 FMT64_PREFIX "u"
-# define FMTx64 FMT64_PREFIX "x"
-# define FMTdPTR FMTPTR_PREFIX "d"
-# define FMTuPTR FMTPTR_PREFIX "u"
-# define FMTxPTR FMTPTR_PREFIX "x"
-#else
-# include <inttypes.h>
-# define FMTd32 PRId32
-# define FMTu32 PRIu32
-# define FMTx32 PRIx32
-# define FMTd64 PRId64
-# define FMTu64 PRIu64
-# define FMTx64 PRIx64
-# define FMTdPTR PRIdPTR
-# define FMTuPTR PRIuPTR
-# define FMTxPTR PRIxPTR
-#endif
-
-/* Size of stack-allocated buffer passed to buferror(). */
-#define BUFERROR_BUF 64
-
-/*
- * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
- * large enough for all possible uses within jemalloc.
- */
-#define MALLOC_PRINTF_BUFSIZE 4096
+#define UTIL_INLINE static inline
/* Junk fill patterns. */
#ifndef JEMALLOC_ALLOC_JUNK
@@ -52,25 +15,25 @@
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
-#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
+#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
+
+/* cpp macro definition stringification. */
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
-#ifdef JEMALLOC_CC_SILENCE
-# define JEMALLOC_CC_SILENCE_INIT(v) = v
-#else
-# define JEMALLOC_CC_SILENCE_INIT(v)
-#endif
+#define JEMALLOC_CC_SILENCE_INIT(v) = v
#ifdef __GNUC__
-# define likely(x) __builtin_expect(!!(x), 1)
-# define unlikely(x) __builtin_expect(!!(x), 0)
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
#else
-# define likely(x) !!(x)
-# define unlikely(x) !!(x)
+# define likely(x) !!(x)
+# define unlikely(x) !!(x)
#endif
#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
@@ -79,245 +42,9 @@
#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
-#include "jemalloc/internal/assert.h"
-
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#define cassert(c) do { \
- if (unlikely(!(c))) \
- not_reached(); \
-} while (0)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-int buferror(int err, char *buf, size_t buflen);
-uintmax_t malloc_strtoumax(const char *restrict nptr,
- char **restrict endptr, int base);
-void malloc_write(const char *s);
-
-/*
- * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
- * point math.
- */
-size_t malloc_vsnprintf(char *str, size_t size, const char *format,
- va_list ap);
-size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
- JEMALLOC_FORMAT_PRINTF(3, 4);
-void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap);
-void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
- const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
-void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-unsigned ffs_llu(unsigned long long bitmap);
-unsigned ffs_lu(unsigned long bitmap);
-unsigned ffs_u(unsigned bitmap);
-unsigned ffs_zu(size_t bitmap);
-unsigned ffs_u64(uint64_t bitmap);
-unsigned ffs_u32(uint32_t bitmap);
-uint64_t pow2_ceil_u64(uint64_t x);
-uint32_t pow2_ceil_u32(uint32_t x);
-size_t pow2_ceil_zu(size_t x);
-unsigned lg_floor(size_t x);
-void set_errno(int errnum);
-int get_errno(void);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
-
-/* Sanity check. */
-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
- || !defined(JEMALLOC_INTERNAL_FFS)
-# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
-#endif
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_llu(unsigned long long bitmap)
-{
-
- return (JEMALLOC_INTERNAL_FFSLL(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_lu(unsigned long bitmap)
-{
-
- return (JEMALLOC_INTERNAL_FFSL(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u(unsigned bitmap)
-{
-
- return (JEMALLOC_INTERNAL_FFS(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_zu(size_t bitmap)
-{
-
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return (ffs_u(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return (ffs_lu(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return (ffs_llu(bitmap));
-#else
-#error No implementation for size_t ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u64(uint64_t bitmap)
-{
-
-#if LG_SIZEOF_LONG == 3
- return (ffs_lu(bitmap));
-#elif LG_SIZEOF_LONG_LONG == 3
- return (ffs_llu(bitmap));
-#else
-#error No implementation for 64-bit ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u32(uint32_t bitmap)
-{
-
-#if LG_SIZEOF_INT == 2
- return (ffs_u(bitmap));
-#else
-#error No implementation for 32-bit ffs()
-#endif
- return (ffs_u(bitmap));
-}
-
-JEMALLOC_INLINE uint64_t
-pow2_ceil_u64(uint64_t x)
-{
-
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x |= x >> 32;
- x++;
- return (x);
-}
-
-JEMALLOC_INLINE uint32_t
-pow2_ceil_u32(uint32_t x)
-{
-
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
- return (x);
-}
-
-/* Compute the smallest power of 2 that is >= x. */
-JEMALLOC_INLINE size_t
-pow2_ceil_zu(size_t x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return (pow2_ceil_u64(x));
-#else
- return (pow2_ceil_u32(x));
-#endif
-}
-
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
- size_t ret;
-
- assert(x != 0);
-
- asm ("bsr %1, %0"
- : "=r"(ret) // Outputs.
- : "r"(x) // Inputs.
- );
- assert(ret < UINT_MAX);
- return ((unsigned)ret);
-}
-#elif (defined(_MSC_VER))
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
- unsigned long ret;
-
- assert(x != 0);
-
-#if (LG_SIZEOF_PTR == 3)
- _BitScanReverse64(&ret, x);
-#elif (LG_SIZEOF_PTR == 2)
- _BitScanReverse(&ret, x);
-#else
-# error "Unsupported type size for lg_floor()"
-#endif
- assert(ret < UINT_MAX);
- return ((unsigned)ret);
-}
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
-
- assert(x != 0);
-
-#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
- return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
-#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
- return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
-#else
-# error "Unsupported type size for lg_floor()"
-#endif
-}
-#else
-JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
-
- assert(x != 0);
-
- x |= (x >> 1);
- x |= (x >> 2);
- x |= (x >> 4);
- x |= (x >> 8);
- x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3)
- x |= (x >> 32);
-#endif
- if (x == SIZE_T_MAX)
- return ((8 << LG_SIZEOF_PTR) - 1);
- x++;
- return (ffs_zu(x) - 2);
-}
-#endif
-
/* Set error code. */
-JEMALLOC_INLINE void
-set_errno(int errnum)
-{
-
+UTIL_INLINE void
+set_errno(int errnum) {
#ifdef _WIN32
SetLastError(errnum);
#else
@@ -326,17 +53,15 @@ set_errno(int errnum)
}
/* Get last error code. */
-JEMALLOC_INLINE int
-get_errno(void)
-{
-
+UTIL_INLINE int
+get_errno(void) {
#ifdef _WIN32
- return (GetLastError());
+ return GetLastError();
#else
- return (errno);
+ return errno;
#endif
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#undef UTIL_INLINE
+
+#endif /* JEMALLOC_INTERNAL_UTIL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/valgrind.h b/contrib/jemalloc/include/jemalloc/internal/valgrind.h
deleted file mode 100644
index 877a142b62d9..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/valgrind.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-#ifdef JEMALLOC_VALGRIND
-#include <valgrind/valgrind.h>
-
-/*
- * The size that is reported to Valgrind must be consistent through a chain of
- * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
- * jemalloc, so it is critical that all callers of these macros provide usize
- * rather than request size. As a result, buffer overflow detection is
- * technically weakened for the standard API, though it is generally accepted
- * practice to consider any extra bytes reported by malloc_usable_size() as
- * usable space.
- */
-#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
- if (unlikely(in_valgrind)) \
- valgrind_make_mem_noaccess(ptr, usize); \
-} while (0)
-#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
- if (unlikely(in_valgrind)) \
- valgrind_make_mem_undefined(ptr, usize); \
-} while (0)
-#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
- if (unlikely(in_valgrind)) \
- valgrind_make_mem_defined(ptr, usize); \
-} while (0)
-/*
- * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
- * calls must be embedded in macros rather than in functions so that when
- * Valgrind reports errors, there are no extra stack frames in the backtraces.
- */
-#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
- if (unlikely(in_valgrind && cond)) { \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
- zero); \
- } \
-} while (0)
-#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
- ((ptr) != (old_ptr))
-#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
- (ptr == NULL)
-#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
- (old_ptr == NULL)
-#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
- old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
- if (unlikely(in_valgrind)) { \
- size_t rzsize = p2rz(tsdn, ptr); \
- \
- if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
- old_ptr)) { \
- VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
- usize, rzsize); \
- if (zero && old_usize < usize) { \
- valgrind_make_mem_defined( \
- (void *)((uintptr_t)ptr + \
- old_usize), usize - old_usize); \
- } \
- } else { \
- if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
- old_ptr_null(old_ptr)) { \
- valgrind_freelike_block(old_ptr, \
- old_rzsize); \
- } \
- if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
- ptr_null(ptr)) { \
- size_t copy_size = (old_usize < usize) \
- ? old_usize : usize; \
- size_t tail_size = usize - copy_size; \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
- rzsize, false); \
- if (copy_size > 0) { \
- valgrind_make_mem_defined(ptr, \
- copy_size); \
- } \
- if (zero && tail_size > 0) { \
- valgrind_make_mem_defined( \
- (void *)((uintptr_t)ptr + \
- copy_size), tail_size); \
- } \
- } \
- } \
- } \
-} while (0)
-#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
- if (unlikely(in_valgrind)) \
- valgrind_freelike_block(ptr, rzsize); \
-} while (0)
-#else
-#define RUNNING_ON_VALGRIND ((unsigned)0)
-#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
- ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
- zero) do {} while (0)
-#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
-#endif
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#ifdef JEMALLOC_VALGRIND
-void valgrind_make_mem_noaccess(void *ptr, size_t usize);
-void valgrind_make_mem_undefined(void *ptr, size_t usize);
-void valgrind_make_mem_defined(void *ptr, size_t usize);
-void valgrind_freelike_block(void *ptr, size_t usize);
-#endif
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/contrib/jemalloc/include/jemalloc/internal/witness.h b/contrib/jemalloc/include/jemalloc/internal/witness.h
index 30d8c7e902d0..33be66610716 100644
--- a/contrib/jemalloc/include/jemalloc/internal/witness.h
+++ b/contrib/jemalloc/include/jemalloc/internal/witness.h
@@ -1,29 +1,35 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
+#ifndef JEMALLOC_INTERNAL_WITNESS_H
+#define JEMALLOC_INTERNAL_WITNESS_H
-typedef struct witness_s witness_t;
-typedef unsigned witness_rank_t;
-typedef ql_head(witness_t) witness_list_t;
-typedef int witness_comp_t (const witness_t *, const witness_t *);
+#include "jemalloc/internal/ql.h"
+
+/******************************************************************************/
+/* LOCK RANKS */
+/******************************************************************************/
/*
- * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
- * the witness machinery.
+ * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
+ * machinery.
*/
-#define WITNESS_RANK_OMIT 0U
+
+#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
-#define WITNESS_RANK_INIT 1U
-#define WITNESS_RANK_CTL 1U
+#define WITNESS_RANK_INIT 1U
+#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_TCACHES 2U
-#define WITNESS_RANK_ARENAS 3U
+#define WITNESS_RANK_ARENAS 3U
+
+#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
+
+#define WITNESS_RANK_PROF_DUMP 5U
+#define WITNESS_RANK_PROF_BT2GCTX 6U
+#define WITNESS_RANK_PROF_TDATAS 7U
+#define WITNESS_RANK_PROF_TDATA 8U
+#define WITNESS_RANK_PROF_GCTX 9U
-#define WITNESS_RANK_PROF_DUMP 4U
-#define WITNESS_RANK_PROF_BT2GCTX 5U
-#define WITNESS_RANK_PROF_TDATAS 6U
-#define WITNESS_RANK_PROF_TDATA 7U
-#define WITNESS_RANK_PROF_GCTX 8U
+#define WITNESS_RANK_BACKGROUND_THREAD 10U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
@@ -31,29 +37,44 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
-#define WITNESS_RANK_CORE 9U
+#define WITNESS_RANK_CORE 11U
+
+#define WITNESS_RANK_DECAY 11U
+#define WITNESS_RANK_TCACHE_QL 12U
+#define WITNESS_RANK_EXTENT_GROW 13U
+#define WITNESS_RANK_EXTENTS 14U
+#define WITNESS_RANK_EXTENT_AVAIL 15U
+
+#define WITNESS_RANK_EXTENT_POOL 16U
+#define WITNESS_RANK_RTREE 17U
+#define WITNESS_RANK_BASE 18U
+#define WITNESS_RANK_ARENA_LARGE 19U
+
+#define WITNESS_RANK_LEAF 0xffffffffU
+#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
+#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
+#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
-#define WITNESS_RANK_ARENA 9U
-#define WITNESS_RANK_ARENA_CHUNKS 10U
-#define WITNESS_RANK_ARENA_NODE_CACHE 11U
-
-#define WITNESS_RANK_BASE 12U
-
-#define WITNESS_RANK_LEAF 0xffffffffU
-#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
-#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
-#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
-
-#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
-
-#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
+/* PER-WITNESS DATA */
+/******************************************************************************/
+#if defined(JEMALLOC_DEBUG)
+# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
+#else
+# define WITNESS_INITIALIZER(name, rank)
+#endif
+
+typedef struct witness_s witness_t;
+typedef unsigned witness_rank_t;
+typedef ql_head(witness_t) witness_list_t;
+typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
+ void *);
struct witness_s {
/* Name, used for printing lock order reversal messages. */
@@ -72,143 +93,159 @@ struct witness_s {
*/
witness_comp_t *comp;
+ /* Opaque data, passed to comp(). */
+ void *opaque;
+
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
-#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
+/* PER-THREAD DATA */
+/******************************************************************************/
+typedef struct witness_tsd_s witness_tsd_t;
+struct witness_tsd_s {
+ witness_list_t witnesses;
+ bool forking;
+};
+
+#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
+#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
+
+/******************************************************************************/
+/* (PER-THREAD) NULLABILITY HELPERS */
+/******************************************************************************/
+typedef struct witness_tsdn_s witness_tsdn_t;
+struct witness_tsdn_s {
+ witness_tsd_t witness_tsd;
+};
+
+JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
+witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
+ return (witness_tsdn_t *)witness_tsd;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
+ return witness_tsdn == NULL;
+}
+
+JEMALLOC_ALWAYS_INLINE witness_tsd_t *
+witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
+ assert(!witness_tsdn_null(witness_tsdn));
+ return &witness_tsdn->witness_tsd;
+}
+
+/******************************************************************************/
+/* API */
+/******************************************************************************/
+void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp, void *opaque);
-void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp);
-#ifdef JEMALLOC_JET
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
-extern witness_lock_error_t *witness_lock_error;
-#else
-void witness_lock_error(const witness_list_t *witnesses,
- const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
+extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
+
typedef void (witness_owner_error_t)(const witness_t *);
-extern witness_owner_error_t *witness_owner_error;
-#else
-void witness_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
+extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
+
typedef void (witness_not_owner_error_t)(const witness_t *);
-extern witness_not_owner_error_t *witness_not_owner_error;
-#else
-void witness_not_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
+extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
+
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
-extern witness_depth_error_t *witness_depth_error;
-#else
-void witness_depth_error(const witness_list_t *witnesses,
- witness_rank_t rank_inclusive, unsigned depth);
-#endif
+extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
-void witnesses_cleanup(tsd_t *tsd);
-void witness_fork_cleanup(tsd_t *tsd);
-void witness_prefork(tsd_t *tsd);
-void witness_postfork_parent(tsd_t *tsd);
-void witness_postfork_child(tsd_t *tsd);
+void witnesses_cleanup(witness_tsd_t *witness_tsd);
+void witness_prefork(witness_tsd_t *witness_tsd);
+void witness_postfork_parent(witness_tsd_t *witness_tsd);
+void witness_postfork_child(witness_tsd_t *witness_tsd);
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool witness_owner(tsd_t *tsd, const witness_t *witness);
-void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
-void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
-void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
- unsigned depth);
-void witness_assert_depth(tsdn_t *tsdn, unsigned depth);
-void witness_assert_lockless(tsdn_t *tsdn);
-void witness_lock(tsdn_t *tsdn, witness_t *witness);
-void witness_unlock(tsdn_t *tsdn, witness_t *witness);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-JEMALLOC_INLINE bool
-witness_owner(tsd_t *tsd, const witness_t *witness)
-{
+/* Helper, not intended for direct use. */
+static inline bool
+witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
- witnesses = tsd_witnessesp_get(tsd);
+ witnesses = &witness_tsd->witnesses;
ql_foreach(w, witnesses, link) {
- if (w == witness)
- return (true);
+ if (w == witness) {
+ return true;
+ }
}
- return (false);
+ return false;
}
-JEMALLOC_INLINE void
-witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
-{
- tsd_t *tsd;
+static inline void
+witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
+ witness_tsd_t *witness_tsd;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (witness_tsdn_null(witness_tsdn)) {
return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
- if (witness_owner(tsd, witness))
+ if (witness_owner(witness_tsd, witness)) {
return;
+ }
witness_owner_error(witness);
}
-JEMALLOC_INLINE void
-witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
-{
- tsd_t *tsd;
+static inline void
+witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
+ const witness_t *witness) {
+ witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
witness_t *w;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (witness_tsdn_null(witness_tsdn)) {
return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
- witnesses = tsd_witnessesp_get(tsd);
+ witnesses = &witness_tsd->witnesses;
ql_foreach(w, witnesses, link) {
- if (w == witness)
+ if (w == witness) {
witness_not_owner_error(witness);
+ }
}
}
-JEMALLOC_INLINE void
-witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
- unsigned depth) {
- tsd_t *tsd;
+static inline void
+witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
+ witness_rank_t rank_inclusive, unsigned depth) {
+ witness_tsd_t *witness_tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (witness_tsdn_null(witness_tsdn)) {
return;
- tsd = tsdn_tsd(tsdn);
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
d = 0;
- witnesses = tsd_witnessesp_get(tsd);
+ witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
@@ -218,49 +255,53 @@ witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
d++;
}
}
- if (d != depth)
+ if (d != depth) {
witness_depth_error(witnesses, rank_inclusive, depth);
+ }
}
-JEMALLOC_INLINE void
-witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
+static inline void
+witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
+ witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
}
-JEMALLOC_INLINE void
-witness_assert_lockless(tsdn_t *tsdn) {
- witness_assert_depth(tsdn, 0);
+static inline void
+witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
+ witness_assert_depth(witness_tsdn, 0);
}
-JEMALLOC_INLINE void
-witness_lock(tsdn_t *tsdn, witness_t *witness)
-{
- tsd_t *tsd;
+static inline void
+witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
+ witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
witness_t *w;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (witness_tsdn_null(witness_tsdn)) {
return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
- witness_assert_not_owner(tsdn, witness);
+ witness_assert_not_owner(witness_tsdn, witness);
- witnesses = tsd_witnessesp_get(tsd);
+ witnesses = &witness_tsd->witnesses;
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
- } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
+ } else if (witness_tsd->forking && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
- witness->comp || w->comp(w, witness) > 0)) {
+ witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
+ 0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
@@ -272,33 +313,34 @@ witness_lock(tsdn_t *tsdn, witness_t *witness)
ql_tail_insert(witnesses, witness, link);
}
-JEMALLOC_INLINE void
-witness_unlock(tsdn_t *tsdn, witness_t *witness)
-{
- tsd_t *tsd;
+static inline void
+witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
+ witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (witness_tsdn_null(witness_tsdn)) {
return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ }
+ witness_tsd = witness_tsdn_tsd(witness_tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
/*
* Check whether owner before removal, rather than relying on
* witness_assert_owner() to abort, so that unit tests can test this
* function's failure mode without causing undefined behavior.
*/
- if (witness_owner(tsd, witness)) {
- witnesses = tsd_witnessesp_get(tsd);
+ if (witness_owner(witness_tsd, witness)) {
+ witnesses = &witness_tsd->witnesses;
ql_remove(witnesses, witness, link);
- } else
- witness_assert_owner(tsdn, witness);
+ } else {
+ witness_assert_owner(witness_tsdn, witness);
+ }
}
-#endif
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_WITNESS_H */
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h
index 6b386623f3e0..c4ad45b2a0c5 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.h
@@ -1,11 +1,11 @@
#ifndef JEMALLOC_H_
-#define JEMALLOC_H_
+#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
-#define JEMALLOC_HAVE_ATTR
+#define JEMALLOC_HAVE_ATTR
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
@@ -14,14 +14,14 @@ extern "C" {
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
-#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
-#define JEMALLOC_OVERRIDE_VALLOC
+#define JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
@@ -30,7 +30,7 @@ extern "C" {
*
* Match the operating system's prototype.
*/
-#define JEMALLOC_USABLE_SIZE_CONST const
+#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
@@ -48,7 +48,7 @@ extern "C" {
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#define LG_SIZEOF_PTR 3
+#define LG_SIZEOF_PTR 3
/*
* Name mangling for public symbols is controlled by --with-mangling and
@@ -56,26 +56,26 @@ extern "C" {
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
-# define je_malloc_conf malloc_conf
-# define je_malloc_message malloc_message
-# define je_malloc malloc
-# define je_calloc calloc
-# define je_posix_memalign posix_memalign
# define je_aligned_alloc aligned_alloc
-# define je_realloc realloc
-# define je_free free
-# define je_mallocx mallocx
-# define je_rallocx rallocx
-# define je_xallocx xallocx
-# define je_sallocx sallocx
+# define je_calloc calloc
# define je_dallocx dallocx
-# define je_sdallocx sdallocx
-# define je_nallocx nallocx
+# define je_free free
# define je_mallctl mallctl
-# define je_mallctlnametomib mallctlnametomib
# define je_mallctlbymib mallctlbymib
+# define je_mallctlnametomib mallctlnametomib
+# define je_malloc malloc
+# define je_malloc_conf malloc_conf
+# define je_malloc_message malloc_message
# define je_malloc_stats_print malloc_stats_print
# define je_malloc_usable_size malloc_usable_size
+# define je_mallocx mallocx
+# define je_nallocx nallocx
+# define je_posix_memalign posix_memalign
+# define je_rallocx rallocx
+# define je_realloc realloc
+# define je_sallocx sallocx
+# define je_sdallocx sdallocx
+# define je_xallocx xallocx
# define je_valloc valloc
#endif
@@ -87,32 +87,51 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "4.5.0-0-g04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5"
-#define JEMALLOC_VERSION_MAJOR 4
-#define JEMALLOC_VERSION_MINOR 5
-#define JEMALLOC_VERSION_BUGFIX 0
-#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5"
+#define JEMALLOC_VERSION "5.0.0-4-g84f6c2cae0fb1399377ef6aea9368444c4987cc6"
+#define JEMALLOC_VERSION_MAJOR 5
+#define JEMALLOC_VERSION_MINOR 0
+#define JEMALLOC_VERSION_BUGFIX 0
+#define JEMALLOC_VERSION_NREV 4
+#define JEMALLOC_VERSION_GID "84f6c2cae0fb1399377ef6aea9368444c4987cc6"
-# define MALLOCX_LG_ALIGN(la) ((int)(la))
-# if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
-# else
-# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
-# endif
-# define MALLOCX_ZERO ((int)0x40)
+#define MALLOCX_LG_ALIGN(la) ((int)(la))
+#if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+#else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+#endif
+#define MALLOCX_ZERO ((int)0x40)
/*
* Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
* encodes MALLOCX_TCACHE_NONE.
*/
-# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
-# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
-# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+/*
+ * Use as arena index in "arena.<i>.{purge,decay,dss}" and
+ * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
+ * definition is intentionally specified in raw decimal format to support
+ * cpp-based string concatenation, e.g.
+ *
+ * #define STRINGIFY_HELPER(x) #x
+ * #define STRINGIFY(x) STRINGIFY_HELPER(x)
+ *
+ * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
+ * 0);
+ */
+#define MALLCTL_ARENAS_ALL 4096
+/*
+ * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
+ * destroyed arenas.
+ */
+#define MALLCTL_ARENAS_DESTROYED 4097
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
@@ -120,7 +139,7 @@ extern "C" {
# define JEMALLOC_CXX_THROW
#endif
-#if _MSC_VER
+#if defined(_MSC_VER)
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
@@ -252,63 +271,83 @@ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
JEMALLOC_ATTR(malloc);
#endif
+typedef struct extent_hooks_s extent_hooks_t;
+
/*
* void *
- * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
- * bool *commit, unsigned arena_ind);
+ * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
*/
-typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
+typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
+ bool *, unsigned);
/*
* bool
- * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
+ * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
*/
-typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
+typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * void
+ * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
/*
* bool
- * chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
- * unsigned arena_ind);
+ * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
*/
-typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
+typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
/*
* bool
- * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
- * unsigned arena_ind);
+ * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
*/
-typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
+typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
+ size_t, unsigned);
/*
* bool
- * chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
- * unsigned arena_ind);
+ * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
*/
-typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
+typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
/*
* bool
- * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
- * bool committed, unsigned arena_ind);
+ * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
*/
-typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
+typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ bool, unsigned);
/*
* bool
- * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
- * bool committed, unsigned arena_ind);
+ * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
*/
-typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
-
-typedef struct {
- chunk_alloc_t *alloc;
- chunk_dalloc_t *dalloc;
- chunk_commit_t *commit;
- chunk_decommit_t *decommit;
- chunk_purge_t *purge;
- chunk_split_t *split;
- chunk_merge_t *merge;
-} chunk_hooks_t;
+typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
+ bool, unsigned);
+
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
/*
* By default application code must explicitly refer to mangled symbol names,
@@ -321,26 +360,26 @@ typedef struct {
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
-# define malloc_conf je_malloc_conf
-# define malloc_message je_malloc_message
-# define malloc je_malloc
-# define calloc je_calloc
-# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
-# define realloc je_realloc
-# define free je_free
-# define mallocx je_mallocx
-# define rallocx je_rallocx
-# define xallocx je_xallocx
-# define sallocx je_sallocx
+# define calloc je_calloc
# define dallocx je_dallocx
-# define sdallocx je_sdallocx
-# define nallocx je_nallocx
+# define free je_free
# define mallctl je_mallctl
-# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
+# define mallctlnametomib je_mallctlnametomib
+# define malloc je_malloc
+# define malloc_conf je_malloc_conf
+# define malloc_message je_malloc_message
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
+# define mallocx je_mallocx
+# define nallocx je_nallocx
+# define posix_memalign je_posix_memalign
+# define rallocx je_rallocx
+# define realloc je_realloc
+# define sallocx je_sallocx
+# define sdallocx je_sdallocx
+# define xallocx je_xallocx
# define valloc je_valloc
#endif
@@ -352,26 +391,26 @@ typedef struct {
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
-# undef je_malloc_conf
-# undef je_malloc_message
-# undef je_malloc
-# undef je_calloc
-# undef je_posix_memalign
# undef je_aligned_alloc
-# undef je_realloc
-# undef je_free
-# undef je_mallocx
-# undef je_rallocx
-# undef je_xallocx
-# undef je_sallocx
+# undef je_calloc
# undef je_dallocx
-# undef je_sdallocx
-# undef je_nallocx
+# undef je_free
# undef je_mallctl
-# undef je_mallctlnametomib
# undef je_mallctlbymib
+# undef je_mallctlnametomib
+# undef je_malloc
+# undef je_malloc_conf
+# undef je_malloc_message
# undef je_malloc_stats_print
# undef je_malloc_usable_size
+# undef je_mallocx
+# undef je_nallocx
+# undef je_posix_memalign
+# undef je_rallocx
+# undef je_realloc
+# undef je_sallocx
+# undef je_sdallocx
+# undef je_xallocx
# undef je_valloc
#endif
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h b/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
index c58a8f39ef2e..355b565ce746 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
@@ -10,51 +10,65 @@
#undef JEMALLOC_DSS
+#undef JEMALLOC_BACKGROUND_THREAD
+
/*
* The following are architecture-dependent, so conditionally define them for
* each supported architecture.
*/
#undef JEMALLOC_TLS_MODEL
#undef STATIC_PAGE_SHIFT
+#undef LG_VADDR
#undef LG_SIZEOF_PTR
#undef LG_SIZEOF_INT
#undef LG_SIZEOF_LONG
#undef LG_SIZEOF_INTMAX_T
#ifdef __i386__
+# define LG_VADDR 32
# define LG_SIZEOF_PTR 2
# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
#endif
#ifdef __ia64__
+# define LG_VADDR 64
# define LG_SIZEOF_PTR 3
#endif
#ifdef __sparc64__
+# define LG_VADDR 64
# define LG_SIZEOF_PTR 3
# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
#endif
#ifdef __amd64__
+# define LG_VADDR 48
# define LG_SIZEOF_PTR 3
# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
#endif
#ifdef __arm__
+# define LG_VADDR 32
# define LG_SIZEOF_PTR 2
#endif
#ifdef __aarch64__
+# define LG_VADDR 48
# define LG_SIZEOF_PTR 3
#endif
#ifdef __mips__
#ifdef __mips_n64
+# define LG_VADDR 64
# define LG_SIZEOF_PTR 3
#else
+# define LG_VADDR 32
# define LG_SIZEOF_PTR 2
#endif
#endif
#ifdef __powerpc64__
+# define LG_VADDR 64
# define LG_SIZEOF_PTR 3
#elif defined(__powerpc__)
+# define LG_VADDR 32
# define LG_SIZEOF_PTR 2
#endif
#ifdef __riscv__
+# define LG_VADDR 64
# define LG_SIZEOF_PTR 3
#endif
@@ -128,8 +142,17 @@ extern int __isthreaded;
#define read _read
#define write _write
#define close _close
+#define pthread_join _pthread_join
+#define pthread_once _pthread_once
+#define pthread_self _pthread_self
+#define pthread_equal _pthread_equal
#define pthread_mutex_lock _pthread_mutex_lock
+#define pthread_mutex_trylock _pthread_mutex_trylock
#define pthread_mutex_unlock _pthread_mutex_unlock
+#define pthread_cond_init _pthread_cond_init
+#define pthread_cond_wait _pthread_cond_wait
+#define pthread_cond_timedwait _pthread_cond_timedwait
+#define pthread_cond_signal _pthread_cond_signal
#ifdef JEMALLOC_C_
/*
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h b/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h
index fa7b350adcda..1a58874306eb 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h
@@ -1,57 +1,77 @@
+typedef struct extent_hooks_s extent_hooks_t;
+
/*
* void *
- * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
- * bool *commit, unsigned arena_ind);
+ * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
*/
-typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
+typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
+ bool *, unsigned);
/*
* bool
- * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
+ * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
*/
-typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
+typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * void
+ * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
/*
* bool
- * chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
- * unsigned arena_ind);
+ * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
*/
-typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
+typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
/*
* bool
- * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
- * unsigned arena_ind);
+ * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
*/
-typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
+typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
+ size_t, unsigned);
/*
* bool
- * chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
- * unsigned arena_ind);
+ * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
*/
-typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
+typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
/*
* bool
- * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
- * bool committed, unsigned arena_ind);
+ * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
*/
-typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
+typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ bool, unsigned);
/*
* bool
- * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
- * bool committed, unsigned arena_ind);
+ * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
*/
-typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
-
-typedef struct {
- chunk_alloc_t *alloc;
- chunk_dalloc_t *dalloc;
- chunk_commit_t *commit;
- chunk_decommit_t *decommit;
- chunk_purge_t *purge;
- chunk_split_t *split;
- chunk_merge_t *merge;
-} chunk_hooks_t;
+typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
+ bool, unsigned);
+
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index a9dff0b0a266..019dd8775746 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -1,30 +1,58 @@
-#define JEMALLOC_ARENA_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
-bool opt_thp = true;
-static bool thp_initially_huge;
-purge_mode_t opt_purge = PURGE_DEFAULT;
-const char *purge_mode_names[] = {
- "ratio",
- "decay",
- "N/A"
+/*
+ * Define names for both unininitialized and initialized phases, so that
+ * options and mallctl processing are straightforward.
+ */
+const char *percpu_arena_mode_names[] = {
+ "percpu",
+ "phycpu",
+ "disabled",
+ "percpu",
+ "phycpu"
+};
+percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
+
+ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
+ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
+
+static atomic_zd_t dirty_decay_ms_default;
+static atomic_zd_t muzzy_decay_ms_default;
+
+const arena_bin_info_t arena_bin_info[NBINS] = {
+#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
+ {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
+#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
+ lg_delta_lookup) \
+ BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
+ (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
+ (ndelta<<lg_delta)))
+ SIZE_CLASSES
+#undef BIN_INFO_bin_yes
+#undef BIN_INFO_bin_no
+#undef SC
};
-ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-static ssize_t lg_dirty_mult_default;
-ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
-static ssize_t decay_time_default;
-
-arena_bin_info_t arena_bin_info[NBINS];
-size_t map_bias;
-size_t map_misc_offset;
-size_t arena_maxrun; /* Max run size for arenas. */
-size_t large_maxclass; /* Max large size class. */
-unsigned nlclasses; /* Number of large size classes. */
-unsigned nhclasses; /* Number of huge size classes. */
+const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+};
/******************************************************************************/
/*
@@ -32,1485 +60,751 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition.
*/
-static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk);
-static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
- size_t ndirty_limit);
-static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
- bool dirty, bool cleaned, bool decommitted);
-static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
+static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit);
+static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread, bool all);
+static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin);
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_t *bin);
/******************************************************************************/
-JEMALLOC_INLINE_C size_t
-arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
-{
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- pageind = arena_miscelm_to_pageind(miscelm);
- mapbits = arena_mapbits_get(chunk, pageind);
- return (arena_mapbits_size_decode(mapbits));
-}
-
-JEMALLOC_INLINE_C const extent_node_t *
-arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
-{
- arena_chunk_t *chunk;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- return (&chunk->node);
-}
-
-JEMALLOC_INLINE_C int
-arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
-{
- size_t a_sn, b_sn;
-
- assert(a != NULL);
- assert(b != NULL);
-
- a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
- b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
-
- return ((a_sn > b_sn) - (a_sn < b_sn));
-}
-
-JEMALLOC_INLINE_C int
-arena_ad_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
-{
- uintptr_t a_miscelm = (uintptr_t)a;
- uintptr_t b_miscelm = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
-}
-
-JEMALLOC_INLINE_C int
-arena_snad_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
-{
- int ret;
-
- assert(a != NULL);
- assert(b != NULL);
-
- ret = arena_sn_comp(a, b);
- if (ret != 0)
- return (ret);
-
- ret = arena_ad_comp(a, b);
- return (ret);
-}
-
-/* Generate pairing heap functions. */
-ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
- ph_link, arena_snad_comp)
-
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
-#endif
-static size_t
-run_quantize_floor(size_t size)
-{
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- assert(size != 0);
- assert(size == PAGE_CEILING(size));
-
- pind = psz2ind(size - large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return (size);
+static bool
+arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
+ assert(((char *)arena_stats)[i] == 0);
+ }
}
- ret = pind2sz(pind - 1) + large_pad;
- assert(ret <= size);
- return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
-#endif
-static size_t
-run_quantize_ceil(size_t size)
-{
- size_t ret;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- ret = run_quantize_floor(size);
- if (ret < size) {
- /*
- * Skip a quantization that may have an adequately large run,
- * because under-sized runs may be mixed in. This only happens
- * when an unusual size is requested, i.e. for aligned
- * allocation, and is just one of several places where linear
- * search would potentially find sufficiently aligned available
- * memory somewhere lower.
- */
- ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
+ WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
+ return true;
}
- return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
#endif
-
-static void
-arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages)
-{
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get_const(chunk, pageind))));
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- assert((npages << LG_PAGE) < chunksize);
- assert(pind2sz(pind) <= chunksize);
- arena_run_heap_insert(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
+ /* Memory is zeroed, so there is no need to clear stats. */
+ return false;
}
static void
-arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages)
-{
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get_const(chunk, pageind))));
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- assert((npages << LG_PAGE) < chunksize);
- assert(pind2sz(pind) <= chunksize);
- arena_run_heap_remove(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
-}
-
-static void
-arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages)
-{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
- assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
- CHUNK_MAP_DIRTY);
-
- qr_new(&miscelm->rd, rd_link);
- qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
- arena->ndirty += npages;
+arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_lock(tsdn, &arena_stats->mtx);
+#endif
}
static void
-arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages)
-{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
- assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
- CHUNK_MAP_DIRTY);
-
- qr_remove(&miscelm->rd, rd_link);
- assert(arena->ndirty >= npages);
- arena->ndirty -= npages;
-}
-
-static size_t
-arena_chunk_dirty_npages(const extent_node_t *node)
-{
-
- return (extent_node_size_get(node) >> LG_PAGE);
-}
-
-void
-arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
-{
-
- if (cache) {
- extent_node_dirty_linkage_init(node);
- extent_node_dirty_insert(node, &arena->runs_dirty,
- &arena->chunks_cache);
- arena->ndirty += arena_chunk_dirty_npages(node);
- }
+arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_unlock(tsdn, &arena_stats->mtx);
+#endif
}
-void
-arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
-{
-
- if (dirty) {
- extent_node_dirty_remove(node);
- assert(arena->ndirty >= arena_chunk_dirty_npages(node));
- arena->ndirty -= arena_chunk_dirty_npages(node);
- }
+static uint64_t
+arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ return *p;
+#endif
}
-JEMALLOC_INLINE_C void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
-{
- void *ret;
- size_t regind;
- arena_chunk_map_misc_t *miscelm;
- void *rpages;
-
- assert(run->nfree > 0);
- assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
-
- regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
- miscelm = arena_run_to_miscelm(run);
- rpages = arena_miscelm_to_rpages(miscelm);
- ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
- (uintptr_t)(bin_info->reg_interval * regind));
- run->nfree--;
- return (ret);
-}
-
-JEMALLOC_INLINE_C void
-arena_run_reg_dalloc(arena_run_t *run, void *ptr)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size_t regind = arena_run_regind(run, bin_info, ptr);
-
- assert(run->nfree < bin_info->nregs);
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr -
- ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
- (uintptr_t)bin_info->reg0_offset)) %
- (uintptr_t)bin_info->reg_interval == 0);
- assert((uintptr_t)ptr >=
- (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
- (uintptr_t)bin_info->reg0_offset);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
- run->nfree++;
+static void
+arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ *p += x;
+#endif
}
-JEMALLOC_INLINE_C void
-arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
-{
-
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (npages << LG_PAGE));
- memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
- (npages << LG_PAGE));
+UNUSED static void
+arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ *p -= x;
+ assert(*p + x >= *p);
+#endif
}
-JEMALLOC_INLINE_C void
-arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
-
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
- << LG_PAGE)), PAGE);
+/*
+ * Non-atomically sets *dst += src. *dst needs external synchronization.
+ * This lets us avoid the cost of a fetch_add when its unnecessary (note that
+ * the types here are atomic).
+ */
+static void
+arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
+#else
+ *dst += src;
+#endif
}
-JEMALLOC_INLINE_C void
-arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
- size_t i;
- UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
-
- arena_run_page_mark_zeroed(chunk, run_ind);
- for (i = 0; i < PAGE / sizeof(size_t); i++)
- assert(p[i] == 0);
+static size_t
+arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_zu(p, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ return atomic_load_zu(p, ATOMIC_RELAXED);
+#endif
}
static void
-arena_nactive_add(arena_t *arena, size_t add_pages)
-{
-
- if (config_stats) {
- size_t cactive_add = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
- LG_PAGE);
- if (cactive_add != 0)
- stats_cactive_add(cactive_add);
- }
- arena->nactive += add_pages;
+arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
+ size_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
+ atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
+#endif
}
static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages)
-{
-
- if (config_stats) {
- size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
- CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
- if (cactive_sub != 0)
- stats_cactive_sub(cactive_sub);
- }
- arena->nactive -= sub_pages;
+arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
+ size_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
+ atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
+#endif
}
+/* Like the _u64 variant, needs an externally synchronized *dst. */
static void
-arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
- size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
-{
- size_t total_pages, rem_pages;
-
- assert(flag_dirty == 0 || flag_decommitted == 0);
-
- total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
- LG_PAGE;
- assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
- flag_dirty);
- assert(need_pages <= total_pages);
- rem_pages = total_pages - need_pages;
-
- arena_avail_remove(arena, chunk, run_ind, total_pages);
- if (flag_dirty != 0)
- arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
- arena_nactive_add(arena, need_pages);
-
- /* Keep track of trailing unused pages for later use. */
- if (rem_pages > 0) {
- size_t flags = flag_dirty | flag_decommitted;
- size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
- 0;
-
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE), flags |
- (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
- flag_unzeroed_mask));
- arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
- (rem_pages << LG_PAGE), flags |
- (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
- flag_unzeroed_mask));
- if (flag_dirty != 0) {
- arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
- rem_pages);
- }
- arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
- }
-}
-
-static bool
-arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
- bool remove, bool zero)
-{
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- size_t flag_dirty, flag_decommitted, run_ind, need_pages;
- size_t flag_unzeroed_mask;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- miscelm = arena_run_to_miscelm(run);
- run_ind = arena_miscelm_to_pageind(miscelm);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
-
- if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
- run_ind << LG_PAGE, size, arena->ind))
- return (true);
-
- if (remove) {
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
- flag_decommitted, need_pages);
- }
-
- if (zero) {
- if (flag_decommitted != 0) {
- /* The run is untouched, and therefore zeroed. */
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
- (need_pages << LG_PAGE));
- } else if (flag_dirty != 0) {
- /* The run is dirty, so all pages must be zeroed. */
- arena_run_zero(chunk, run_ind, need_pages);
- } else {
- /*
- * The run is clean, so some pages may be zeroed (i.e.
- * never before touched).
- */
- size_t i;
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
- != 0)
- arena_run_zero(chunk, run_ind+i, 1);
- else if (config_debug) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+i);
- } else {
- arena_run_page_mark_zeroed(chunk,
- run_ind+i);
- }
- }
- }
- } else {
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
- }
-
- /*
- * Set the last element first, in case the run only contains one page
- * (i.e. both statements set the same element).
- */
- flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
- CHUNK_MAP_UNZEROED : 0;
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages-1)));
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
- return (false);
-}
-
-static bool
-arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
-{
-
- return (arena_run_split_large_helper(arena, run, size, true, zero));
+arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
+ size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
+ atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
-static bool
-arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
-{
-
- return (arena_run_split_large_helper(arena, run, size, false, zero));
+void
+arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ szind_t szind, uint64_t nrequests) {
+ arena_stats_lock(tsdn, arena_stats);
+ arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
+ NBINS].nrequests, nrequests);
+ arena_stats_unlock(tsdn, arena_stats);
}
-static bool
-arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
- szind_t binind)
-{
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
-
- assert(binind != BININD_INVALID);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- miscelm = arena_run_to_miscelm(run);
- run_ind = arena_miscelm_to_pageind(miscelm);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
-
- if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
- run_ind << LG_PAGE, size, arena->ind))
- return (true);
-
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
- flag_decommitted, need_pages);
-
- for (i = 0; i < need_pages; i++) {
- size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
- run_ind+i);
- arena_mapbits_small_set(chunk, run_ind+i, i, binind,
- flag_unzeroed);
- if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+i);
- }
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
- return (false);
+void
+arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
+ arena_stats_lock(tsdn, arena_stats);
+ arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
+ arena_stats_unlock(tsdn, arena_stats);
}
-static arena_chunk_t *
-arena_chunk_init_spare(arena_t *arena)
-{
- arena_chunk_t *chunk;
-
- assert(arena->spare != NULL);
-
- chunk = arena->spare;
- arena->spare = NULL;
-
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxrun);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxrun);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- return (chunk);
+void
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
+ *nthreads += arena_nthreads_get(arena, false);
+ *dss = dss_prec_names[arena_dss_prec_get(arena)];
+ *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
+ *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
+ *ndirty += extents_npages_get(&arena->extents_dirty);
+ *nmuzzy += extents_npages_get(&arena->extents_muzzy);
}
-static bool
-arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
- bool *gdump)
-{
+void
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) {
+ cassert(config_stats);
- /*
- * The extent node notion of "committed" doesn't directly apply to
- * arena chunks. Arbitrarily mark them as committed. The commit state
- * of runs is tracked individually, and upon chunk deallocation the
- * entire chunk is in a consistent commit state.
- */
- extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
- extent_node_achunk_set(&chunk->node, true);
- return (chunk_register(chunk, &chunk->node, gdump));
-}
-
-static arena_chunk_t *
-arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
-{
- arena_chunk_t *chunk;
- size_t sn;
-
- malloc_mutex_unlock(tsdn, &arena->lock);
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
-
- chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
- NULL, chunksize, chunksize, &sn, zero, commit);
- if (chunk != NULL && !*commit) {
- /* Commit header. */
- if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
- LG_PAGE, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, sn, *zero, *commit);
- chunk = NULL;
- }
- }
- if (chunk != NULL) {
- bool gdump;
- if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
- if (!*commit) {
- /* Undo commit of header. */
- chunk_hooks->decommit(chunk, chunksize, 0,
- map_bias << LG_PAGE, arena->ind);
- }
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, sn, *zero, *commit);
- chunk = NULL;
+ arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
+ muzzy_decay_ms, nactive, ndirty, nmuzzy);
+
+ size_t base_allocated, base_resident, base_mapped;
+ base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
+ &base_mapped);
+
+ arena_stats_lock(tsdn, &arena->stats);
+
+ arena_stats_accum_zu(&astats->mapped, base_mapped
+ + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
+ arena_stats_accum_zu(&astats->retained,
+ extents_npages_get(&arena->extents_retained) << LG_PAGE);
+
+ arena_stats_accum_u64(&astats->decay_dirty.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.npurge));
+ arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.nmadvise));
+ arena_stats_accum_u64(&astats->decay_dirty.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.purged));
+
+ arena_stats_accum_u64(&astats->decay_muzzy.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.npurge));
+ arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.nmadvise));
+ arena_stats_accum_u64(&astats->decay_muzzy.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.purged));
+
+ arena_stats_accum_zu(&astats->base, base_allocated);
+ arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
+ arena_stats_accum_zu(&astats->resident, base_resident +
+ (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
+ extents_npages_get(&arena->extents_dirty) +
+ extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
+
+ for (szind_t i = 0; i < NSIZES - NBINS; i++) {
+ uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nmalloc);
+ arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
+ arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
+
+ uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].ndalloc);
+ arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
+ arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
+
+ uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nrequests);
+ arena_stats_accum_u64(&lstats[i].nrequests,
+ nmalloc + nrequests);
+ arena_stats_accum_u64(&astats->nrequests_large,
+ nmalloc + nrequests);
+
+ assert(nmalloc >= ndalloc);
+ assert(nmalloc - ndalloc <= SIZE_T_MAX);
+ size_t curlextents = (size_t)(nmalloc - ndalloc);
+ lstats[i].curlextents += curlextents;
+ arena_stats_accum_zu(&astats->allocated_large,
+ curlextents * sz_index2size(NBINS + i));
+ }
+
+ arena_stats_unlock(tsdn, &arena->stats);
+
+ /* tcache_bytes counts currently cached bytes. */
+ atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ tcache_t *tcache;
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ szind_t i = 0;
+ for (; i < NBINS; i++) {
+ tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
}
- if (config_prof && opt_prof && gdump)
- prof_gdump(tsdn);
- }
-
- malloc_mutex_lock(tsdn, &arena->lock);
- return (chunk);
-}
-
-static arena_chunk_t *
-arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
- bool *commit)
-{
- arena_chunk_t *chunk;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- size_t sn;
-
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
- malloc_mutex_assert_owner(tsdn, &arena->lock);
-
- chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
- chunksize, &sn, zero, commit, true);
- if (chunk != NULL) {
- bool gdump;
- if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
- chunksize, sn, true);
- return (NULL);
+ for (; i < nhbins; i++) {
+ tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
}
- if (config_prof && opt_prof && gdump) {
- malloc_mutex_unlock(tsdn, &arena->lock);
- prof_gdump(tsdn);
- malloc_mutex_lock(tsdn, &arena->lock);
- }
- }
- if (chunk == NULL) {
- chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
- &chunk_hooks, zero, commit);
}
+ malloc_mutex_prof_read(tsdn,
+ &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
+ &arena->tcache_ql_mtx);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
+
+#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
+ malloc_mutex_lock(tsdn, &arena->mtx); \
+ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
+ &arena->mtx); \
+ malloc_mutex_unlock(tsdn, &arena->mtx);
+
+ /* Gather per arena mutex profiling data. */
+ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
+ READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
+ arena_prof_mutex_extent_avail)
+ READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
+ arena_prof_mutex_extents_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
+ arena_prof_mutex_extents_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
+ arena_prof_mutex_extents_retained)
+ READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
+ arena_prof_mutex_decay_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
+ arena_prof_mutex_decay_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(base->mtx,
+ arena_prof_mutex_base)
+#undef READ_ARENA_MUTEX_PROF_DATA
+
+ nstime_copy(&astats->uptime, &arena->create_time);
+ nstime_update(&astats->uptime);
+ nstime_subtract(&astats->uptime, &arena->create_time);
+
+ for (szind_t i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
- if (config_stats && chunk != NULL) {
- arena->stats.mapped += chunksize;
- arena->stats.metadata_mapped += (map_bias << LG_PAGE);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock);
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ bstats[i].curregs += bin->stats.curregs;
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ bstats[i].nslabs += bin->stats.nslabs;
+ bstats[i].reslabs += bin->stats.reslabs;
+ bstats[i].curslabs += bin->stats.curslabs;
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
-
- return (chunk);
}
-static arena_chunk_t *
-arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
-{
- arena_chunk_t *chunk;
- bool zero, commit;
- size_t flag_unzeroed, flag_decommitted, i;
-
- assert(arena->spare == NULL);
-
- zero = false;
- commit = false;
- chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
- if (chunk == NULL)
- return (NULL);
-
- if (config_thp && opt_thp) {
- chunk->hugepage = thp_initially_huge;
- }
-
- /*
- * Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
- * or decommitted chunk.
- */
- flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
- flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
- flag_unzeroed | flag_decommitted);
- /*
- * There is no need to initialize the internal page map entries unless
- * the chunk is not zeroed.
- */
- if (!zero) {
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_internal_set(chunk, i, flag_unzeroed);
+void
+arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
+ extent);
+ if (arena_dirty_decay_ms_get(arena) == 0) {
+ arena_decay_dirty(tsdn, arena, false, true);
} else {
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
- if (config_debug) {
- for (i = map_bias+1; i < chunk_npages-1; i++) {
- assert(arena_mapbits_unzeroed_get(chunk, i) ==
- flag_unzeroed);
- }
- }
+ arena_background_thread_inactivity_check(tsdn, arena);
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
- flag_unzeroed);
-
- return (chunk);
}
-static arena_chunk_t *
-arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
-{
- arena_chunk_t *chunk;
-
- if (arena->spare != NULL)
- chunk = arena_chunk_init_spare(arena);
- else {
- chunk = arena_chunk_init_hard(tsdn, arena);
- if (chunk == NULL)
- return (NULL);
- }
+static void *
+arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
+ const arena_bin_info_t *bin_info) {
+ void *ret;
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ size_t regind;
- ql_elm_new(&chunk->node, ql_link);
- ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
+ assert(extent_nfree_get(slab) > 0);
+ assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
- return (chunk);
+ regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ (uintptr_t)(bin_info->reg_size * regind));
+ extent_nfree_dec(slab);
+ return ret;
}
-static void
-arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
-{
- size_t sn;
- UNUSED bool hugepage JEMALLOC_CC_SILENCE_INIT(false);
- bool committed;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
-
- chunk_deregister(chunk, &chunk->node);
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
+ size_t diff, regind;
- sn = extent_node_sn_get(&chunk->node);
- if (config_thp && opt_thp) {
- hugepage = chunk->hugepage;
- }
- committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
- if (!committed) {
- /*
- * Decommit the header. Mark the chunk as decommitted even if
- * header decommit fails, since treating a partially committed
- * chunk as committed has a high potential for causing later
- * access of decommitted memory.
- */
- chunk_hooks = chunk_hooks_get(tsdn, arena);
- chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
- arena->ind);
- }
- if (config_thp && opt_thp && hugepage != thp_initially_huge) {
- /*
- * Convert chunk back to initial THP state, so that all
- * subsequent chunk allocations start out in a consistent state.
- */
- if (thp_initially_huge) {
- pages_huge(chunk, chunksize);
- } else {
- pages_nohuge(chunk, chunksize);
- }
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
+ (uintptr_t)arena_bin_info[binind].reg_size == 0);
+
+ /* Avoid doing division with a variable divisor. */
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
+ switch (binind) {
+#define REGIND_bin_yes(index, reg_size) \
+ case index: \
+ regind = diff / (reg_size); \
+ assert(diff == regind * (reg_size)); \
+ break;
+#define REGIND_bin_no(index, reg_size)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
+ lg_delta_lookup) \
+ REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta))
+ SIZE_CLASSES
+#undef REGIND_bin_yes
+#undef REGIND_bin_no
+#undef SC
+ default: not_reached();
}
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
- sn, committed);
+ assert(regind < arena_bin_info[binind].nregs);
- if (config_stats) {
- arena->stats.mapped -= chunksize;
- arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
- }
+ return regind;
}
static void
-arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
-{
-
- assert(arena->spare != spare);
+arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
+ arena_slab_data_t *slab_data, void *ptr) {
+ szind_t binind = extent_szind_get(slab);
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size_t regind = arena_slab_regind(slab, binind, ptr);
- if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
- arena_run_dirty_remove(arena, spare, map_bias,
- chunk_npages-map_bias);
- }
+ assert(extent_nfree_get(slab) < bin_info->nregs);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
- arena_chunk_discard(tsdn, arena, spare);
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ extent_nfree_inc(slab);
}
static void
-arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
-{
- arena_chunk_t *spare;
-
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxrun);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxrun);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
- assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
- arena_mapbits_decommitted_get(chunk, chunk_npages-1));
-
- /* Remove run from runs_avail, so that the arena does not use it. */
- arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
-
- ql_remove(&arena->achunks, &chunk->node, ql_link);
- spare = arena->spare;
- arena->spare = chunk;
- if (spare != NULL)
- arena_spare_discard(tsdn, arena, spare);
+arena_nactive_add(arena_t *arena, size_t add_pages) {
+ atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
}
static void
-arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
-
- cassert(config_stats);
-
- arena->stats.nmalloc_huge++;
- arena->stats.allocated_huge += usize;
- arena->stats.hstats[index].nmalloc++;
- arena->stats.hstats[index].curhchunks++;
-}
-
-static void
-arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
-
- cassert(config_stats);
-
- arena->stats.nmalloc_huge--;
- arena->stats.allocated_huge -= usize;
- arena->stats.hstats[index].nmalloc--;
- arena->stats.hstats[index].curhchunks--;
+arena_nactive_sub(arena_t *arena, size_t sub_pages) {
+ assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
+ atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
}
static void
-arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
+arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
cassert(config_stats);
- arena->stats.ndalloc_huge++;
- arena->stats.allocated_huge -= usize;
- arena->stats.hstats[index].ndalloc++;
- arena->stats.hstats[index].curhchunks--;
-}
-
-static void
-arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
-
- cassert(config_stats);
+ if (usize < LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
+ }
+ index = sz_size2index(usize);
+ hindex = (index >= NBINS) ? index - NBINS : 0;
- arena->stats.ndalloc_huge++;
- arena->stats.hstats[index].ndalloc--;
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].nmalloc, 1);
}
static void
-arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
+arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
cassert(config_stats);
- arena->stats.ndalloc_huge--;
- arena->stats.allocated_huge += usize;
- arena->stats.hstats[index].ndalloc--;
- arena->stats.hstats[index].curhchunks++;
-}
-
-static void
-arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
-{
+ if (usize < LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
+ }
+ index = sz_size2index(usize);
+ hindex = (index >= NBINS) ? index - NBINS : 0;
- arena_huge_dalloc_stats_update(arena, oldsize);
- arena_huge_malloc_stats_update(arena, usize);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].ndalloc, 1);
}
static void
-arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
- size_t usize)
-{
-
- arena_huge_dalloc_stats_update_undo(arena, oldsize);
- arena_huge_malloc_stats_update_undo(arena, usize);
-}
-
-extent_node_t *
-arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
-{
- extent_node_t *node;
-
- malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
- node = ql_last(&arena->node_cache, ql_link);
- if (node == NULL) {
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
- return (base_alloc(tsdn, sizeof(extent_node_t)));
- }
- ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
- return (node);
+arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
+ size_t usize) {
+ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
}
-void
-arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
-{
+extent_t *
+arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
- ql_elm_new(node, ql_link);
- ql_tail_insert(&arena->node_cache, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
-}
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
-static void *
-arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
- bool *zero, size_t csize)
-{
- void *ret;
+ szind_t szind = sz_size2index(usize);
+ size_t mapped_add;
bool commit = true;
-
- ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
- alignment, sn, zero, &commit);
- if (ret == NULL) {
- /* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
+ szind, zero, &commit);
+ if (extent == NULL) {
+ extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
+ false, szind, zero, &commit);
+ }
+ size_t size = usize + sz_large_pad;
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
+ usize, sz_large_pad, alignment, false, szind, zero,
+ &commit);
if (config_stats) {
- arena_huge_malloc_stats_update_undo(arena, usize);
- arena->stats.mapped -= usize;
+ /*
+ * extent may be NULL on OOM, but in that case
+ * mapped_add isn't used below, so there's no need to
+ * conditionlly set it to 0 here.
+ */
+ mapped_add = size;
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
- }
-
- return (ret);
-}
-
-void *
-arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, size_t *sn, bool *zero)
-{
- void *ret;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- size_t csize = CHUNK_CEILING(usize);
- bool commit = true;
-
- malloc_mutex_lock(tsdn, &arena->lock);
-
- /* Optimistically update stats. */
- if (config_stats) {
- arena_huge_malloc_stats_update(arena, usize);
- arena->stats.mapped += usize;
+ } else if (config_stats) {
+ mapped_add = 0;
}
- arena_nactive_add(arena, usize >> LG_PAGE);
- ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
- alignment, sn, zero, &commit, true);
- malloc_mutex_unlock(tsdn, &arena->lock);
- if (ret == NULL) {
- ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
- usize, alignment, sn, zero, csize);
+ if (extent != NULL) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+ if (mapped_add != 0) {
+ arena_stats_add_zu(tsdn, &arena->stats,
+ &arena->stats.mapped, mapped_add);
+ }
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_add(arena, size >> LG_PAGE);
}
- return (ret);
+ return extent;
}
void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
- size_t sn)
-{
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- size_t csize;
-
- csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
if (config_stats) {
- arena_huge_dalloc_stats_update(arena, usize);
- arena->stats.mapped -= usize;
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_dalloc_stats_update(tsdn, arena,
+ extent_usize_get(extent));
+ arena_stats_unlock(tsdn, &arena->stats);
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
-
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
}
void
-arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
-{
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = oldusize - usize;
- assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
- assert(oldsize != usize);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- if (config_stats)
- arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (oldsize < usize)
- arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
- else
- arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, size_t sn)
-{
- size_t udiff = oldsize - usize;
- size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
-
- malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
- arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (cdiff != 0)
- arena->stats.mapped -= cdiff;
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
-
- if (cdiff != 0) {
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- void *nchunk = (void *)((uintptr_t)chunk +
- CHUNK_CEILING(usize));
-
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- sn, true);
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
}
-static bool
-arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
- size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
-{
- bool err;
- bool commit = true;
-
- err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- chunksize, sn, zero, &commit) == NULL);
- if (err) {
- /* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
- if (config_stats) {
- arena_huge_ralloc_stats_update_undo(arena, oldsize,
- usize);
- arena->stats.mapped -= cdiff;
- }
- arena_nactive_sub(arena, udiff >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
- } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
- cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- *sn, *zero, true);
- err = true;
- }
- return (err);
-}
-
-bool
-arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, bool *zero)
-{
- bool err;
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
- void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
- size_t udiff = usize - oldsize;
- size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
- size_t sn;
- bool commit = true;
-
- malloc_mutex_lock(tsdn, &arena->lock);
+void
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = usize - oldusize;
- /* Optimistically update stats. */
if (config_stats) {
- arena_huge_ralloc_stats_update(arena, oldsize, usize);
- arena->stats.mapped += cdiff;
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
}
arena_nactive_add(arena, udiff >> LG_PAGE);
-
- err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, &sn, zero, &commit, true) == NULL);
- malloc_mutex_unlock(tsdn, &arena->lock);
- if (err) {
- err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
- &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
- udiff, cdiff);
- } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
- cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- sn, *zero, true);
- err = true;
- }
-
- return (err);
}
-/*
- * Do first-best-fit run selection, i.e. select the lowest run that best fits.
- * Run sizes are indexed, so not all candidate runs are necessarily exactly the
- * same size.
- */
-static arena_run_t *
-arena_run_first_best_fit(arena_t *arena, size_t size)
-{
- pszind_t pind, i;
-
- pind = psz2ind(run_quantize_ceil(size));
-
- for (i = pind; pind2sz(i) <= chunksize; i++) {
- arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
- &arena->runs_avail[i]);
- if (miscelm != NULL)
- return (&miscelm->run);
- }
-
- return (NULL);
-}
-
-static arena_run_t *
-arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
-{
- arena_run_t *run = arena_run_first_best_fit(arena, size);
- if (run != NULL) {
- if (arena_run_split_large(arena, run, size, zero))
- run = NULL;
- }
- return (run);
+static ssize_t
+arena_decay_ms_read(arena_decay_t *decay) {
+ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
}
-static arena_run_t *
-arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
-
- assert(size <= arena_maxrun);
- assert(size == PAGE_CEILING(size));
-
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_large_helper(arena, size, zero);
- if (run != NULL)
- return (run);
-
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(tsdn, arena);
- if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
- if (arena_run_split_large(arena, run, size, zero))
- run = NULL;
- return (run);
- }
-
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_large_helper(arena, size, zero));
-}
-
-static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
-{
- arena_run_t *run = arena_run_first_best_fit(arena, size);
- if (run != NULL) {
- if (arena_run_split_small(arena, run, size, binind))
- run = NULL;
- }
- return (run);
-}
-
-static arena_run_t *
-arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
-
- assert(size <= arena_maxrun);
- assert(size == PAGE_CEILING(size));
- assert(binind != BININD_INVALID);
-
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_small_helper(arena, size, binind);
- if (run != NULL)
- return (run);
-
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(tsdn, arena);
- if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
- if (arena_run_split_small(arena, run, size, binind))
- run = NULL;
- return (run);
- }
-
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_small_helper(arena, size, binind));
-}
-
-static bool
-arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
-{
-
- return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
- << 3));
-}
-
-ssize_t
-arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
-{
- ssize_t lg_dirty_mult;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- lg_dirty_mult = arena->lg_dirty_mult;
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (lg_dirty_mult);
-}
-
-bool
-arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
-{
-
- if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
- return (true);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena->lg_dirty_mult = lg_dirty_mult;
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (false);
+static void
+arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
+ atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
}
static void
-arena_decay_deadline_init(arena_t *arena)
-{
-
- assert(opt_purge == purge_mode_decay);
-
+arena_decay_deadline_init(arena_decay_t *decay) {
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
- nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
- nstime_add(&arena->decay.deadline, &arena->decay.interval);
- if (arena->decay.time > 0) {
+ nstime_copy(&decay->deadline, &decay->epoch);
+ nstime_add(&decay->deadline, &decay->interval);
+ if (arena_decay_ms_read(decay) > 0) {
nstime_t jitter;
- nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
- nstime_ns(&arena->decay.interval)));
- nstime_add(&arena->decay.deadline, &jitter);
+ nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
+ nstime_ns(&decay->interval)));
+ nstime_add(&decay->deadline, &jitter);
}
}
static bool
-arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
-{
-
- assert(opt_purge == purge_mode_decay);
-
- return (nstime_compare(&arena->decay.deadline, time) <= 0);
+arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
+ return (nstime_compare(&decay->deadline, time) <= 0);
}
static size_t
-arena_decay_backlog_npages_limit(const arena_t *arena)
-{
- static const uint64_t h_steps[] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
- };
+arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
uint64_t sum;
size_t npages_limit_backlog;
unsigned i;
- assert(opt_purge == purge_mode_decay);
-
/*
* For each element of decay_backlog, multiply by the corresponding
* fixed-point smoothstep decay factor. Sum the products, then divide
* to round down to the nearest whole number of pages.
*/
sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
- sum += arena->decay.backlog[i] * h_steps[i];
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
- return (npages_limit_backlog);
+ return npages_limit_backlog;
}
static void
-arena_decay_backlog_update_last(arena_t *arena)
-{
- size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
- arena->ndirty - arena->decay.ndirty : 0;
- arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
+arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
+ size_t npages_delta = (current_npages > decay->nunpurged) ?
+ current_npages - decay->nunpurged : 0;
+ decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
+
+ if (config_debug) {
+ if (current_npages > decay->ceil_npages) {
+ decay->ceil_npages = current_npages;
+ }
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ assert(decay->ceil_npages >= npages_limit);
+ if (decay->ceil_npages > npages_limit) {
+ decay->ceil_npages = npages_limit;
+ }
+ }
}
static void
-arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
-{
-
+arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
+ size_t current_npages) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
- memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
- memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
+ memmove(decay->backlog, &decay->backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
- memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
+ memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
- arena_decay_backlog_update_last(arena);
+ arena_decay_backlog_update_last(decay, current_npages);
}
static void
-arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
-{
- uint64_t nadvance_u64;
- nstime_t delta;
+arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, size_t current_npages, size_t npages_limit) {
+ if (current_npages > npages_limit) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ npages_limit);
+ }
+}
- assert(opt_purge == purge_mode_decay);
- assert(arena_decay_deadline_reached(arena, time));
+static void
+arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
+ size_t current_npages) {
+ assert(arena_decay_deadline_reached(decay, time));
+ nstime_t delta;
nstime_copy(&delta, time);
- nstime_subtract(&delta, &arena->decay.epoch);
- nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
+ nstime_subtract(&delta, &decay->epoch);
+
+ uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
- nstime_copy(&delta, &arena->decay.interval);
+ nstime_copy(&delta, &decay->interval);
nstime_imultiply(&delta, nadvance_u64);
- nstime_add(&arena->decay.epoch, &delta);
+ nstime_add(&decay->epoch, &delta);
/* Set a new deadline. */
- arena_decay_deadline_init(arena);
+ arena_decay_deadline_init(decay);
/* Update the backlog. */
- arena_decay_backlog_update(arena, nadvance_u64);
+ arena_decay_backlog_update(decay, nadvance_u64, current_npages);
}
static void
-arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
-{
- size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, const nstime_t *time, bool purge) {
+ size_t current_npages = extents_npages_get(extents);
+ arena_decay_epoch_advance_helper(decay, time, current_npages);
- if (arena->ndirty > ndirty_limit)
- arena_purge_to_limit(tsdn, arena, ndirty_limit);
- arena->decay.ndirty = arena->ndirty;
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
-{
-
- arena_decay_epoch_advance_helper(arena, time);
- arena_decay_epoch_advance_purge(tsdn, arena);
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ /* We may unlock decay->mtx when try_purge(). Finish logging first. */
+ decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
+ current_npages;
+ if (purge) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ current_npages, npages_limit);
+ }
}
static void
-arena_decay_init(arena_t *arena, ssize_t decay_time)
-{
-
- arena->decay.time = decay_time;
- if (decay_time > 0) {
- nstime_init2(&arena->decay.interval, decay_time, 0);
- nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
+arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
+ arena_decay_ms_write(decay, decay_ms);
+ if (decay_ms > 0) {
+ nstime_init(&decay->interval, (uint64_t)decay_ms *
+ KQU(1000000));
+ nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
}
- nstime_init(&arena->decay.epoch, 0);
- nstime_update(&arena->decay.epoch);
- arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
- arena_decay_deadline_init(arena);
- arena->decay.ndirty = arena->ndirty;
- memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+ nstime_init(&decay->epoch, 0);
+ nstime_update(&decay->epoch);
+ decay->jitter_state = (uint64_t)(uintptr_t)decay;
+ arena_decay_deadline_init(decay);
+ decay->nunpurged = 0;
+ memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
-arena_decay_time_valid(ssize_t decay_time)
-{
-
- if (decay_time < -1)
- return (false);
- if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
- return (true);
- return (false);
-}
-
-ssize_t
-arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
-{
- ssize_t decay_time;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- decay_time = arena->decay.time;
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (decay_time);
-}
-
-bool
-arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
-{
-
- if (!arena_decay_time_valid(decay_time))
- return (true);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- /*
- * Restart decay backlog from scratch, which may cause many dirty pages
- * to be immediately purged. It would conceptually be possible to map
- * the old backlog onto the new backlog, but there is no justification
- * for such complexity since decay_time changes are intended to be
- * infrequent, either between the {-1, 0, >0} states, or a one-time
- * arbitrary change during initial arena configuration.
- */
- arena_decay_init(arena, decay_time);
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (false);
+arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms,
+ decay_stats_t *stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
+ assert(((char *)decay)[i] == 0);
+ }
+ decay->ceil_npages = 0;
+ }
+ if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ decay->purging = false;
+ arena_decay_reinit(decay, extents, decay_ms);
+ /* Memory is zeroed, so there is no need to clear stats. */
+ if (config_stats) {
+ decay->stats = stats;
+ }
+ return false;
}
-static void
-arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
-{
-
- assert(opt_purge == purge_mode_ratio);
-
- /* Don't purge if the option is disabled. */
- if (arena->lg_dirty_mult < 0)
- return;
-
- /*
- * Iterate, since preventing recursive purging could otherwise leave too
- * many dirty pages.
- */
- while (true) {
- size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
- if (threshold < chunk_npages)
- threshold = chunk_npages;
- /*
- * Don't purge unless the number of purgeable pages exceeds the
- * threshold.
- */
- if (arena->ndirty <= threshold)
- return;
- arena_purge_to_limit(tsdn, arena, threshold);
+static bool
+arena_decay_ms_valid(ssize_t decay_ms) {
+ if (decay_ms < -1) {
+ return false;
}
+ if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
+ KQU(1000)) {
+ return true;
+ }
+ return false;
}
-static void
-arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
-{
- nstime_t time;
-
- assert(opt_purge == purge_mode_decay);
+static bool
+arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
/* Purge all or nothing if the option is disabled. */
- if (arena->decay.time <= 0) {
- if (arena->decay.time == 0)
- arena_purge_to_limit(tsdn, arena, 0);
- return;
+ ssize_t decay_ms = arena_decay_ms_read(decay);
+ if (decay_ms <= 0) {
+ if (decay_ms == 0) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ 0);
+ }
+ return false;
}
+ nstime_t time;
nstime_init(&time, 0);
nstime_update(&time);
- if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
- &time) > 0)) {
+ if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
+ > 0)) {
/*
* Time went backwards. Move the epoch back in time and
* generate a new deadline, with the expectation that time
@@ -1520,417 +814,315 @@ arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
* clock jitter estimation and compensation isn't feasible here
* because calls into this code are event-driven.
*/
- nstime_copy(&arena->decay.epoch, &time);
- arena_decay_deadline_init(arena);
+ nstime_copy(&decay->epoch, &time);
+ arena_decay_deadline_init(decay);
} else {
/* Verify that time does not go backwards. */
- assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
+ assert(nstime_compare(&decay->epoch, &time) <= 0);
}
/*
* If the deadline has been reached, advance to the current epoch and
* purge to the new limit if necessary. Note that dirty pages created
* during the current epoch are not subject to purge until a future
- * epoch, so as a result purging only happens during epoch advances.
+ * epoch, so as a result purging only happens during epoch advances, or
+ * being triggered by background threads (scheduled event).
*/
- if (arena_decay_deadline_reached(arena, &time))
- arena_decay_epoch_advance(tsdn, arena, &time);
+ bool advance_epoch = arena_decay_deadline_reached(decay, &time);
+ if (advance_epoch) {
+ bool should_purge = is_background_thread ||
+ !background_thread_enabled();
+ arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
+ should_purge);
+ } else if (is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ extents_npages_get(extents),
+ arena_decay_backlog_npages_limit(decay));
+ }
+
+ return advance_epoch;
}
-void
-arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
-{
+static ssize_t
+arena_decay_ms_get(arena_decay_t *decay) {
+ return arena_decay_ms_read(decay);
+}
- /* Don't recursively purge. */
- if (arena->purging)
- return;
+ssize_t
+arena_dirty_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_dirty);
+}
- if (opt_purge == purge_mode_ratio)
- arena_maybe_purge_ratio(tsdn, arena);
- else
- arena_maybe_purge_decay(tsdn, arena);
+ssize_t
+arena_muzzy_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_muzzy);
}
-static size_t
-arena_dirty_count(arena_t *arena)
-{
- size_t ndirty = 0;
- arena_runs_dirty_link_t *rdelm;
- extent_node_t *chunkselm;
-
- for (rdelm = qr_next(&arena->runs_dirty, rd_link),
- chunkselm = qr_next(&arena->chunks_cache, cc_link);
- rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
- size_t npages;
-
- if (rdelm == &chunkselm->rd) {
- npages = extent_node_size_get(chunkselm) >> LG_PAGE;
- chunkselm = qr_next(chunkselm, cc_link);
- } else {
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- rdelm);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(rdelm);
- size_t pageind = arena_miscelm_to_pageind(miscelm);
- assert(arena_mapbits_allocated_get(chunk, pageind) ==
- 0);
- assert(arena_mapbits_large_get(chunk, pageind) == 0);
- assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
- }
- ndirty += npages;
+static bool
+arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
}
- return (ndirty);
-}
-
-static size_t
-arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
- extent_node_t *purge_chunks_sentinel)
-{
- arena_runs_dirty_link_t *rdelm, *rdelm_next;
- extent_node_t *chunkselm;
- size_t nstashed = 0;
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ /*
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_ms changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
+ */
+ arena_decay_reinit(decay, extents, decay_ms);
+ arena_maybe_decay(tsdn, arena, decay, extents, false);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- /* Stash runs/chunks according to ndirty_limit. */
- for (rdelm = qr_next(&arena->runs_dirty, rd_link),
- chunkselm = qr_next(&arena->chunks_cache, cc_link);
- rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
- size_t npages;
- rdelm_next = qr_next(rdelm, rd_link);
-
- if (rdelm == &chunkselm->rd) {
- extent_node_t *chunkselm_next;
- size_t sn;
- bool zero, commit;
- UNUSED void *chunk;
-
- npages = extent_node_size_get(chunkselm) >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
+ return false;
+}
- chunkselm_next = qr_next(chunkselm, cc_link);
- /*
- * Allocate. chunkselm remains valid due to the
- * dalloc_node=false argument to chunk_alloc_cache().
- */
- zero = false;
- commit = false;
- chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
- extent_node_addr_get(chunkselm),
- extent_node_size_get(chunkselm), chunksize, &sn,
- &zero, &commit, false);
- assert(chunk == extent_node_addr_get(chunkselm));
- assert(zero == extent_node_zeroed_get(chunkselm));
- extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
- purge_chunks_sentinel);
- assert(npages == (extent_node_size_get(chunkselm) >>
- LG_PAGE));
- chunkselm = chunkselm_next;
- } else {
- arena_chunk_t *chunk =
- (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(rdelm);
- size_t pageind = arena_miscelm_to_pageind(miscelm);
- arena_run_t *run = &miscelm->run;
- size_t run_size =
- arena_mapbits_unallocated_size_get(chunk, pageind);
-
- npages = run_size >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
+bool
+arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, decay_ms);
+}
- assert(pageind + npages <= chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+npages-1));
+bool
+arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, decay_ms);
+}
- /*
- * If purging the spare chunk's run, make it available
- * prior to allocation.
- */
- if (chunk == arena->spare)
- arena_chunk_alloc(tsdn, arena);
-
- /* Temporarily allocate the free dirty run. */
- arena_run_split_large(arena, run, run_size, false);
- /* Stash. */
- if (false)
- qr_new(rdelm, rd_link); /* Redundant. */
- else {
- assert(qr_next(rdelm, rd_link) == rdelm);
- assert(qr_prev(rdelm, rd_link) == rdelm);
- }
- qr_meld(purge_runs_sentinel, rdelm, rd_link);
- }
+static size_t
+arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
+ extent_list_t *decay_extents) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- nstashed += npages;
- if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
- ndirty_limit)
- break;
+ /* Stash extents according to npages_limit. */
+ size_t nstashed = 0;
+ extent_t *extent;
+ while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
+ npages_limit)) != NULL) {
+ extent_list_append(decay_extents, extent);
+ nstashed += extent_size_get(extent) >> LG_PAGE;
}
-
- return (nstashed);
+ return nstashed;
}
static size_t
-arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- arena_runs_dirty_link_t *purge_runs_sentinel,
- extent_node_t *purge_chunks_sentinel)
-{
- size_t npurged, nmadvise;
- arena_runs_dirty_link_t *rdelm;
- extent_node_t *chunkselm;
-
- if (config_stats)
+arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
+ bool all, extent_list_t *decay_extents) {
+ UNUSED size_t nmadvise, nunmapped;
+ size_t npurged;
+
+ if (config_stats) {
nmadvise = 0;
+ nunmapped = 0;
+ }
npurged = 0;
- malloc_mutex_unlock(tsdn, &arena->lock);
- for (rdelm = qr_next(purge_runs_sentinel, rd_link),
- chunkselm = qr_next(purge_chunks_sentinel, cc_link);
- rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
- size_t npages;
-
- if (rdelm == &chunkselm->rd) {
- /*
- * Don't actually purge the chunk here because 1)
- * chunkselm is embedded in the chunk and must remain
- * valid, and 2) we deallocate the chunk in
- * arena_unstash_purged(), where it is destroyed,
- * decommitted, or purged, depending on chunk
- * deallocation policy.
- */
- size_t size = extent_node_size_get(chunkselm);
- npages = size >> LG_PAGE;
- chunkselm = qr_next(chunkselm, cc_link);
- } else {
- size_t pageind, run_size, flag_unzeroed, flags, i;
- bool decommitted;
- arena_chunk_t *chunk =
- (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(rdelm);
- pageind = arena_miscelm_to_pageind(miscelm);
- run_size = arena_mapbits_large_size_get(chunk, pageind);
- npages = run_size >> LG_PAGE;
-
- /*
- * If this is the first run purged within chunk, mark
- * the chunk as non-THP-capable. This will prevent all
- * use of THPs for this chunk until the chunk as a whole
- * is deallocated.
- */
- if (config_thp && opt_thp && chunk->hugepage) {
- chunk->hugepage = pages_nohuge(chunk,
- chunksize);
+ ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ for (extent_t *extent = extent_list_first(decay_extents); extent !=
+ NULL; extent = extent_list_first(decay_extents)) {
+ if (config_stats) {
+ nmadvise++;
+ }
+ size_t npages = extent_size_get(extent) >> LG_PAGE;
+ npurged += npages;
+ extent_list_remove(decay_extents, extent);
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ if (!all && muzzy_decay_ms != 0 &&
+ !extent_purge_lazy_wrapper(tsdn, arena,
+ r_extent_hooks, extent, 0,
+ extent_size_get(extent))) {
+ extents_dalloc(tsdn, arena, r_extent_hooks,
+ &arena->extents_muzzy, extent);
+ arena_background_thread_inactivity_check(tsdn,
+ arena);
+ break;
}
-
- assert(pageind + npages <= chunk_npages);
- assert(!arena_mapbits_decommitted_get(chunk, pageind));
- assert(!arena_mapbits_decommitted_get(chunk,
- pageind+npages-1));
- decommitted = !chunk_hooks->decommit(chunk, chunksize,
- pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
- if (decommitted) {
- flag_unzeroed = 0;
- flags = CHUNK_MAP_DECOMMITTED;
- } else {
- flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
- chunk_hooks, chunk, chunksize, pageind <<
- LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
- flags = flag_unzeroed;
- }
- arena_mapbits_large_set(chunk, pageind+npages-1, 0,
- flags);
- arena_mapbits_large_set(chunk, pageind, run_size,
- flags);
-
- /*
- * Set the unzeroed flag for internal pages, now that
- * chunk_purge_wrapper() has returned whether the pages
- * were zeroed as a side effect of purging. This chunk
- * map modification is safe even though the arena mutex
- * isn't currently owned by this thread, because the run
- * is marked as allocated, thus protecting it from being
- * modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 1; i < npages-1; i++) {
- arena_mapbits_internal_set(chunk, pageind+i,
- flag_unzeroed);
+ /* Fall through. */
+ case extent_state_muzzy:
+ extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
+ extent);
+ if (config_stats) {
+ nunmapped += npages;
}
+ break;
+ case extent_state_retained:
+ default:
+ not_reached();
}
-
- npurged += npages;
- if (config_stats)
- nmadvise++;
}
- malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
- arena->stats.nmadvise += nmadvise;
- arena->stats.purged += npurged;
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
+ 1);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &decay->stats->nmadvise, nmadvise);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
+ npurged);
+ arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
+ nunmapped << LG_PAGE);
+ arena_stats_unlock(tsdn, &arena->stats);
}
- return (npurged);
+ return npurged;
}
+/*
+ * npages_limit: Decay as many dirty extents as possible without violating the
+ * invariant: (extents_npages_get(extents) >= npages_limit)
+ */
static void
-arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- arena_runs_dirty_link_t *purge_runs_sentinel,
- extent_node_t *purge_chunks_sentinel)
-{
- arena_runs_dirty_link_t *rdelm, *rdelm_next;
- extent_node_t *chunkselm;
-
- /* Deallocate chunks/runs. */
- for (rdelm = qr_next(purge_runs_sentinel, rd_link),
- chunkselm = qr_next(purge_chunks_sentinel, cc_link);
- rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
- rdelm_next = qr_next(rdelm, rd_link);
- if (rdelm == &chunkselm->rd) {
- extent_node_t *chunkselm_next = qr_next(chunkselm,
- cc_link);
- void *addr = extent_node_addr_get(chunkselm);
- size_t size = extent_node_size_get(chunkselm);
- size_t sn = extent_node_sn_get(chunkselm);
- bool zeroed = extent_node_zeroed_get(chunkselm);
- bool committed = extent_node_committed_get(chunkselm);
- extent_node_dirty_remove(chunkselm);
- arena_node_dalloc(tsdn, arena, chunkselm);
- chunkselm = chunkselm_next;
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
- size, sn, zeroed, committed);
- } else {
- arena_chunk_t *chunk =
- (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(rdelm);
- size_t pageind = arena_miscelm_to_pageind(miscelm);
- bool decommitted = (arena_mapbits_decommitted_get(chunk,
- pageind) != 0);
- arena_run_t *run = &miscelm->run;
- qr_remove(rdelm, rd_link);
- arena_run_dalloc(tsdn, arena, run, false, true,
- decommitted);
- }
+arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool all, size_t npages_limit) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 1);
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ if (decay->purging) {
+ return;
}
+ decay->purging = true;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+
+ extent_list_t decay_extents;
+ extent_list_init(&decay_extents);
+
+ size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
+ npages_limit, &decay_extents);
+ if (npurge != 0) {
+ UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
+ &extent_hooks, decay, extents, all, &decay_extents);
+ assert(npurged == npurge);
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ decay->purging = false;
}
-/*
- * NB: ndirty_limit is interpreted differently depending on opt_purge:
- * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
- * desired state:
- * (arena->ndirty <= ndirty_limit)
- * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
- * violating the invariant:
- * (arena->ndirty >= ndirty_limit)
- */
-static void
-arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
-{
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
- size_t npurge, npurged;
- arena_runs_dirty_link_t purge_runs_sentinel;
- extent_node_t purge_chunks_sentinel;
+static bool
+arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread, bool all) {
+ if (all) {
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ arena_decay_to_limit(tsdn, arena, decay, extents, all, 0);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- arena->purging = true;
+ return false;
+ }
- /*
- * Calls to arena_dirty_count() are disabled even for debug builds
- * because overhead grows nonlinearly as memory usage increases.
- */
- if (false && config_debug) {
- size_t ndirty = arena_dirty_count(arena);
- assert(ndirty == arena->ndirty);
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* No need to wait if another thread is in progress. */
+ return true;
}
- assert(opt_purge != purge_mode_ratio || (arena->nactive >>
- arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
- qr_new(&purge_runs_sentinel, rd_link);
- extent_node_dirty_linkage_init(&purge_chunks_sentinel);
+ bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
+ is_background_thread);
+ size_t npages_new;
+ if (epoch_advanced) {
+ /* Backlog is updated on epoch advance. */
+ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
+ }
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
- &purge_runs_sentinel, &purge_chunks_sentinel);
- if (npurge == 0)
- goto label_return;
- npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
- &purge_runs_sentinel, &purge_chunks_sentinel);
- assert(npurged == npurge);
- arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
- &purge_chunks_sentinel);
+ if (have_background_thread && background_thread_enabled() &&
+ epoch_advanced && !is_background_thread) {
+ background_thread_interval_check(tsdn, arena, decay, npages_new);
+ }
- if (config_stats)
- arena->stats.npurge++;
+ return false;
+}
-label_return:
- arena->purging = false;
+static bool
+arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, is_background_thread, all);
+}
+
+static bool
+arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, is_background_thread, all);
}
void
-arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
-{
+arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
+ if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
+ return;
+ }
+ arena_decay_muzzy(tsdn, arena, is_background_thread, all);
+}
+
+static void
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
+ arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
- malloc_mutex_lock(tsdn, &arena->lock);
- if (all)
- arena_purge_to_limit(tsdn, arena, 0);
- else
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
}
static void
-arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
-{
- size_t pageind, npages;
+arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) > 0);
+ extent_heap_insert(&bin->slabs_nonfull, slab);
+}
- cassert(config_prof);
- assert(opt_prof);
+static void
+arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) {
+ extent_heap_remove(&bin->slabs_nonfull, slab);
+}
+
+static extent_t *
+arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
+ extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ if (slab == NULL) {
+ return NULL;
+ }
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ return slab;
+}
+static void
+arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) == 0);
/*
- * Iterate over the allocated runs and remove profiled allocations from
- * the sample set.
+ * Tracking extents is required by arena_reset, which is not allowed
+ * for auto arenas. Bypass this step to avoid touching the extent
+ * linkage (often results in cache misses) for auto arenas.
*/
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- void *ptr = (void *)((uintptr_t)chunk + (pageind
- << LG_PAGE));
- size_t usize = isalloc(tsd_tsdn(tsd), ptr,
- config_prof);
-
- prof_free(tsd, ptr, usize);
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- /* Skip small run. */
- size_t binind = arena_mapbits_binind_get(chunk,
- pageind);
- arena_bin_info_t *bin_info =
- &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- } else {
- /* Skip unallocated run. */
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
- }
- assert(pageind + npages <= chunk_npages);
+ if (arena_is_auto(arena)) {
+ return;
}
+ extent_list_append(&bin->slabs_full, slab);
}
-void
-arena_reset(tsd_t *tsd, arena_t *arena)
-{
- unsigned i;
- extent_node_t *node;
+static void
+arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
+ if (arena_is_auto(arena)) {
+ return;
+ }
+ extent_list_remove(&bin->slabs_full, slab);
+}
+void
+arena_reset(tsd_t *tsd, arena_t *arena) {
/*
* Locking in this function is unintuitive. The caller guarantees that
* no concurrent operations are happening in this arena, but there are
@@ -1945,509 +1137,306 @@ arena_reset(tsd_t *tsd, arena_t *arena)
* stats refreshes would impose an inconvenient burden.
*/
- /* Remove large allocations from prof sample set. */
- if (config_prof && opt_prof) {
- ql_foreach(node, &arena->achunks, ql_link) {
- arena_achunk_prof_reset(tsd, arena,
- extent_node_addr_get(node));
- }
- }
+ /* Large allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- /* Reset curruns for large size classes. */
- if (config_stats) {
- for (i = 0; i < nlclasses; i++)
- arena->stats.lstats[i].curruns = 0;
- }
-
- /* Huge allocations. */
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
- for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
- ql_last(&arena->huge, ql_link)) {
- void *ptr = extent_node_addr_get(node);
+ for (extent_t *extent = extent_list_first(&arena->large); extent !=
+ NULL; extent = extent_list_first(&arena->large)) {
+ void *ptr = extent_base_get(extent);
size_t usize;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
- if (config_stats || (config_prof && opt_prof))
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- /* Remove huge allocation from prof sample set. */
- if (config_prof && opt_prof)
- prof_free(tsd, ptr, usize);
- huge_dalloc(tsd_tsdn(tsd), ptr);
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
- /* Cancel out unwanted effects on stats. */
- if (config_stats)
- arena_huge_reset_stats_cancel(arena, usize);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ if (config_stats || (config_prof && opt_prof)) {
+ usize = sz_index2size(alloc_ctx.szind);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ }
+ /* Remove large allocation from prof sample set. */
+ if (config_prof && opt_prof) {
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ }
+ large_dalloc(tsd_tsdn(tsd), extent);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
/* Bins. */
- for (i = 0; i < NBINS; i++) {
+ for (unsigned i = 0; i < NBINS; i++) {
+ extent_t *slab;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
+ if (bin->slabcur != NULL) {
+ slab = bin->slabcur;
+ bin->slabcur = NULL;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
+ NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
+ slab = extent_list_first(&bin->slabs_full)) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
if (config_stats) {
bin->stats.curregs = 0;
- bin->stats.curruns = 0;
+ bin->stats.curslabs = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- /*
- * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
- * chains directly correspond.
- */
- qr_new(&arena->runs_dirty, rd_link);
- for (node = qr_next(&arena->chunks_cache, cc_link);
- node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
- qr_new(&node->rd, rd_link);
- qr_meld(&arena->runs_dirty, &node->rd, rd_link);
- }
-
- /* Arena chunks. */
- for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
- ql_last(&arena->achunks, ql_link)) {
- ql_remove(&arena->achunks, node, ql_link);
- arena_chunk_discard(tsd_tsdn(tsd), arena,
- extent_node_addr_get(node));
- }
-
- /* Spare. */
- if (arena->spare != NULL) {
- arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
- arena->spare = NULL;
- }
-
- assert(!arena->purging);
- arena->nactive = 0;
-
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
}
static void
-arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
- size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
- size_t flag_decommitted)
-{
- size_t size = *p_size;
- size_t run_ind = *p_run_ind;
- size_t run_pages = *p_run_pages;
-
- /* Try to coalesce forward. */
- if (run_ind + run_pages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
- arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
- flag_decommitted) {
- size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages);
- size_t nrun_pages = nrun_size >> LG_PAGE;
-
- /*
- * Remove successor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages+nrun_pages-1) == nrun_size);
- assert(arena_mapbits_dirty_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_dirty);
- assert(arena_mapbits_decommitted_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_decommitted);
- arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
-
- /*
- * If the successor is dirty, remove it from the set of dirty
- * pages.
- */
- if (flag_dirty != 0) {
- arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
- nrun_pages);
- }
-
- size += nrun_size;
- run_pages += nrun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- /* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
- run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
- flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
- flag_decommitted) {
- size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind-1);
- size_t prun_pages = prun_size >> LG_PAGE;
-
- run_ind -= prun_pages;
-
- /*
- * Remove predecessor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- prun_size);
- assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
- flag_decommitted);
- arena_avail_remove(arena, chunk, run_ind, prun_pages);
-
- /*
- * If the predecessor is dirty, remove it from the set of dirty
- * pages.
- */
- if (flag_dirty != 0) {
- arena_run_dirty_remove(arena, chunk, run_ind,
- prun_pages);
- }
-
- size += prun_size;
- run_pages += prun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- *p_size = size;
- *p_run_ind = run_ind;
- *p_run_pages = run_pages;
-}
-
-static size_t
-arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t run_ind)
-{
- size_t size;
-
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
-
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE || arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
- size = bin_info->run_size;
+arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
+ /*
+ * Iterate over the retained extents and destroy them. This gives the
+ * extent allocator underlying the extent hooks an opportunity to unmap
+ * all retained memory without having to keep its own metadata
+ * structures. In practice, virtual memory for dss-allocated extents is
+ * leaked here, so best practice is to avoid dss for arenas to be
+ * destroyed, or provide custom extent hooks that track retained
+ * dss-based extents for later reuse.
+ */
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ extent_t *extent;
+ while ((extent = extents_evict(tsdn, arena, &extent_hooks,
+ &arena->extents_retained, 0)) != NULL) {
+ extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
}
-
- return (size);
}
-static void
-arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned, bool decommitted)
-{
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- miscelm = arena_run_to_miscelm(run);
- run_ind = arena_miscelm_to_pageind(miscelm);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- size = arena_run_size_get(arena, chunk, run, run_ind);
- run_pages = (size >> LG_PAGE);
- arena_nactive_sub(arena, run_pages);
+void
+arena_destroy(tsd_t *tsd, arena_t *arena) {
+ assert(base_ind_get(arena->base) >= narenas_auto);
+ assert(arena_nthreads_get(arena, false) == 0);
+ assert(arena_nthreads_get(arena, true) == 0);
/*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
+ * No allocations have occurred since arena_reset() was called.
+ * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
+ * extents, so only retained extents may remain.
*/
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
- != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
- flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty || decommitted) {
- size_t flags = flag_dirty | flag_decommitted;
- arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- flags);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
- }
-
- arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
- flag_dirty, flag_decommitted);
-
- /* Insert into runs_avail, now that coalescing is complete. */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
- arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, chunk, run_ind, run_pages);
-
- if (dirty)
- arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
+ assert(extents_npages_get(&arena->extents_dirty) == 0);
+ assert(extents_npages_get(&arena->extents_muzzy) == 0);
- /* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxrun) {
- assert(run_ind == map_bias);
- assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(tsdn, arena, chunk);
- }
+ /* Deallocate retained memory. */
+ arena_destroy_retained(tsd_tsdn(tsd), arena);
/*
- * It is okay to do dirty page processing here even if the chunk was
- * deallocated above, since in that case it is the spare. Waiting
- * until after possible chunk deallocation to do dirty processing
- * allows for an old spare to be fully deallocated, thus decreasing the
- * chances of spuriously crossing the dirty page purging threshold.
+ * Remove the arena pointer from the arenas array. We rely on the fact
+ * that there is no way for the application to get a dirty read from the
+ * arenas array unless there is an inherent race in the application
+ * involving access of an arena being concurrently destroyed. The
+ * application must synchronize knowledge of the arena's validity, so as
+ * long as we use an atomic write to update the arenas array, the
+ * application will get a clean read any time after it synchronizes
+ * knowledge that the arena is no longer valid.
*/
- if (dirty)
- arena_maybe_purge(tsdn, arena);
-}
-
-static void
-arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- size_t pageind = arena_miscelm_to_pageind(miscelm);
- size_t head_npages = (oldsize - newsize) >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
- size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
- size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
- CHUNK_MAP_UNZEROED : 0;
-
- assert(oldsize > newsize);
+ arena_set(base_ind_get(arena->base), NULL);
/*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * leading run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
+ * Destroy the base allocator, which manages all metadata ever mapped by
+ * this arena.
*/
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages-1)));
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
-
- if (config_debug) {
- UNUSED size_t tail_npages = newsize >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
- }
- arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages)));
-
- arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
- 0));
+ base_delete(arena->base);
}
-static void
-arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- size_t pageind = arena_miscelm_to_pageind(miscelm);
- size_t head_npages = newsize >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
- size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
- size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
- CHUNK_MAP_UNZEROED : 0;
- arena_chunk_map_misc_t *tail_miscelm;
- arena_run_t *tail_run;
-
- assert(oldsize > newsize);
+static extent_t *
+arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info,
+ szind_t szind) {
+ extent_t *slab;
+ bool zero, commit;
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * trailing run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages-1)));
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- if (config_debug) {
- UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
+ zero = false;
+ commit = true;
+ slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
+ bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
+
+ if (config_stats && slab != NULL) {
+ arena_stats_mapped_add(tsdn, &arena->stats,
+ bin_info->slab_size);
}
- arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages)));
- tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
- tail_run = &tail_miscelm->run;
- arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
- != 0));
+ return slab;
}
-static void
-arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+static extent_t *
+arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ const arena_bin_info_t *bin_info) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- arena_run_heap_insert(&bin->runs, miscelm);
-}
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ szind_t szind = sz_size2index(bin_info->reg_size);
+ bool zero = false;
+ bool commit = true;
+ extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
+ binind, &zero, &commit);
+ if (slab == NULL) {
+ slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
+ true, binind, &zero, &commit);
+ }
+ if (slab == NULL) {
+ slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
+ bin_info, szind);
+ if (slab == NULL) {
+ return NULL;
+ }
+ }
+ assert(extent_slab_get(slab));
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
-{
- arena_chunk_map_misc_t *miscelm;
+ /* Initialize slab internals. */
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ extent_nfree_set(slab, bin_info->nregs);
+ bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
- miscelm = arena_run_heap_remove_first(&bin->runs);
- if (miscelm == NULL)
- return (NULL);
- if (config_stats)
- bin->stats.reruns++;
+ arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
- return (&miscelm->run);
+ return slab;
}
-static arena_run_t *
-arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
-{
- arena_run_t *run;
- szind_t binind;
- arena_bin_info_t *bin_info;
+static extent_t *
+arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
+ szind_t binind) {
+ extent_t *slab;
+ const arena_bin_info_t *bin_info;
- /* Look for a usable run. */
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
- /* No existing runs have any space available. */
+ /* Look for a usable slab. */
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
+ /* No existing slabs have any space available. */
- binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
- /* Allocate a new run. */
+ /* Allocate a new slab. */
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
- if (run != NULL) {
- /* Initialize run internals. */
- run->binind = binind;
- run->nfree = bin_info->nregs;
- bitmap_init(run->bitmap, &bin_info->bitmap_info);
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
+ slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
/********************************/
malloc_mutex_lock(tsdn, &bin->lock);
- if (run != NULL) {
+ if (slab != NULL) {
if (config_stats) {
- bin->stats.nruns++;
- bin->stats.curruns++;
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
- return (run);
+ return slab;
}
/*
- * arena_run_alloc_small() failed, but another thread may have made
+ * arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
- return (NULL);
+ return NULL;
}
-/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
-{
- szind_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run;
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
+ szind_t binind) {
+ const arena_bin_info_t *bin_info;
+ extent_t *slab;
- binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
- bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(tsdn, arena, bin);
- if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+ if (!arena_is_auto(arena) && bin->slabcur != NULL) {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+ slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
+ if (bin->slabcur != NULL) {
/*
- * Another thread updated runcur while this one ran without the
- * bin lock in arena_bin_nonfull_run_get().
+ * Another thread updated slabcur while this one ran without the
+ * bin lock in arena_bin_nonfull_slab_get().
*/
- void *ret;
- assert(bin->runcur->nfree > 0);
- ret = arena_run_reg_alloc(bin->runcur, bin_info);
- if (run != NULL) {
- arena_chunk_t *chunk;
-
- /*
- * arena_run_alloc_small() may have allocated run, or
- * it may have pulled run from the bin's run tree.
- * Therefore it is unsafe to make any assumptions about
- * how run has previously been used, and
- * arena_bin_lower_run() must be called, as if a region
- * were just deallocated from the run.
- */
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs) {
- arena_dalloc_bin_run(tsdn, arena, chunk, run,
- bin);
- } else
- arena_bin_lower_run(arena, run, bin);
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
+ bin_info);
+ if (slab != NULL) {
+ /*
+ * arena_slab_alloc() may have allocated slab,
+ * or it may have been pulled from
+ * slabs_nonfull. Therefore it is unsafe to
+ * make any assumptions about how slab has
+ * previously been used, and
+ * arena_bin_lower_slab() must be called, as if
+ * a region were just deallocated from the slab.
+ */
+ if (extent_nfree_get(slab) == bin_info->nregs) {
+ arena_dalloc_bin_slab(tsdn, arena, slab,
+ bin);
+ } else {
+ arena_bin_lower_slab(tsdn, arena, slab,
+ bin);
+ }
+ }
+ return ret;
}
- return (ret);
- }
- if (run == NULL)
- return (NULL);
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
- bin->runcur = run;
+ if (slab == NULL) {
+ return NULL;
+ }
+ bin->slabcur = slab;
- assert(bin->runcur->nfree > 0);
+ assert(extent_nfree_get(bin->slabcur) > 0);
- return (arena_run_reg_alloc(bin->runcur, bin_info));
+ return arena_slab_reg_alloc(tsdn, slab, bin_info);
}
void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
- szind_t binind, uint64_t prof_accumbytes)
-{
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill;
arena_bin_t *bin;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
prof_idump(tsdn);
+ }
bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tbin->lg_fill_div); i < nfill; i++) {
- arena_run_t *run;
+ tcache->lg_fill_div[binind]); i < nfill; i++) {
+ extent_t *slab;
void *ptr;
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ptr = arena_bin_malloc_hard(tsdn, arena, bin);
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
+ 0) {
+ ptr = arena_slab_reg_alloc(tsdn, slab,
+ &arena_bin_info[binind]);
+ } else {
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
@@ -2480,134 +1469,40 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
}
void
-arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
-{
-
- size_t redzone_size = bin_info->redzone_size;
-
- if (zero) {
- memset((void *)((uintptr_t)ptr - redzone_size),
- JEMALLOC_ALLOC_JUNK, redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size),
- JEMALLOC_ALLOC_JUNK, redzone_size);
- } else {
- memset((void *)((uintptr_t)ptr - redzone_size),
- JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
+arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) {
+ if (!zero) {
+ memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
}
}
-#ifdef JEMALLOC_JET
-#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
-#endif
-static void
-arena_redzone_corruption(void *ptr, size_t usize, bool after,
- size_t offset, uint8_t byte)
-{
-
- malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
- "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
- after ? "after" : "before", ptr, usize, byte);
-}
-#ifdef JEMALLOC_JET
-#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
-arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(n_arena_redzone_corruption);
-#endif
-
static void
-arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
-{
- bool error = false;
-
- if (opt_junk_alloc) {
- size_t size = bin_info->reg_size;
- size_t redzone_size = bin_info->redzone_size;
- size_t i;
-
- for (i = 1; i <= redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != JEMALLOC_ALLOC_JUNK) {
- error = true;
- arena_redzone_corruption(ptr, size, false, i,
- *byte);
- if (reset)
- *byte = JEMALLOC_ALLOC_JUNK;
- }
- }
- for (i = 0; i < redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != JEMALLOC_ALLOC_JUNK) {
- error = true;
- arena_redzone_corruption(ptr, size, true, i,
- *byte);
- if (reset)
- *byte = JEMALLOC_ALLOC_JUNK;
- }
- }
- }
-
- if (opt_abort && error)
- abort();
-}
-
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
-#endif
-void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
-{
- size_t redzone_size = bin_info->redzone_size;
-
- arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
- bin_info->reg_interval);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(n_arena_dalloc_junk_small);
-#endif
-
-void
-arena_quarantine_junk_small(void *ptr, size_t usize)
-{
- szind_t binind;
- arena_bin_info_t *bin_info;
- cassert(config_fill);
- assert(opt_junk_free);
- assert(opt_quarantine);
- assert(usize <= SMALL_MAXCLASS);
-
- binind = size2index(usize);
- bin_info = &arena_bin_info[binind];
- arena_redzones_validate(ptr, bin_info, true);
+arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) {
+ memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
+arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
+ arena_dalloc_junk_small_impl;
static void *
-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
-{
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
arena_bin_t *bin;
size_t usize;
- arena_run_t *run;
+ extent_t *slab;
assert(binind < NBINS);
bin = &arena->bins[binind];
- usize = index2size(binind);
+ usize = sz_index2size(binind);
malloc_mutex_lock(tsdn, &bin->lock);
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ret = arena_bin_malloc_hard(tsdn, arena, bin);
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
+ ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
+ } else {
+ ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock);
- return (NULL);
+ return NULL;
}
if (config_stats) {
@@ -2616,356 +1511,210 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
bin->stats.curregs++;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
prof_idump(tsdn);
+ }
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (unlikely(opt_zero))
+ } else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
+ }
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
memset(ret, 0, usize);
}
arena_decay_tick(tsdn, arena);
- return (ret);
-}
-
-void *
-arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
-{
- void *ret;
- size_t usize;
- uintptr_t random_offset;
- arena_run_t *run;
- arena_chunk_map_misc_t *miscelm;
- UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
-
- /* Large allocation. */
- usize = index2size(binind);
- malloc_mutex_lock(tsdn, &arena->lock);
- if (config_cache_oblivious) {
- uint64_t r;
-
- /*
- * Compute a uniformly distributed offset within the first page
- * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
- * for 4 KiB pages and 64-byte cachelines.
- */
- r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
- LG_CACHELINE, false);
- random_offset = ((uintptr_t)r) << LG_CACHELINE;
- } else
- random_offset = 0;
- run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
- if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (NULL);
- }
- miscelm = arena_run_to_miscelm(run);
- ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
- random_offset);
- if (config_stats) {
- szind_t index = binind - NBINS;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += usize;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- if (config_prof)
- idump = arena_prof_accum_locked(arena, usize);
- malloc_mutex_unlock(tsdn, &arena->lock);
- if (config_prof && idump)
- prof_idump(tsdn);
-
- if (!zero) {
- if (config_fill) {
- if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
- else if (unlikely(opt_zero))
- memset(ret, 0, usize);
- }
- }
-
- arena_decay_tick(tsdn, arena);
- return (ret);
+ return ret;
}
void *
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero)
-{
-
+ bool zero) {
assert(!tsdn_null(tsdn) || arena != NULL);
- if (likely(!tsdn_null(tsdn)))
+ if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- if (likely(size <= SMALL_MAXCLASS))
- return (arena_malloc_small(tsdn, arena, ind, zero));
- if (likely(size <= large_maxclass))
- return (arena_malloc_large(tsdn, arena, ind, zero));
- assert(index2size(ind) >= chunksize);
- return (huge_malloc(tsdn, arena, index2size(ind), zero));
-}
-
-/* Only handles large allocations that require more than page alignment. */
-static void *
-arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero)
-{
- void *ret;
- size_t alloc_size, leadsize, trailsize;
- arena_run_t *run;
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- void *rpages;
-
- assert(!tsdn_null(tsdn) || arena != NULL);
- assert(usize == PAGE_CEILING(usize));
-
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- alignment = PAGE_CEILING(alignment);
- alloc_size = usize + large_pad + alignment - PAGE;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
- if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (NULL);
- }
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- miscelm = arena_run_to_miscelm(run);
- rpages = arena_miscelm_to_rpages(miscelm);
-
- leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
- (uintptr_t)rpages;
- assert(alloc_size >= leadsize + usize);
- trailsize = alloc_size - leadsize - usize - large_pad;
- if (leadsize != 0) {
- arena_chunk_map_misc_t *head_miscelm = miscelm;
- arena_run_t *head_run = run;
-
- miscelm = arena_miscelm_get_mutable(chunk,
- arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
- LG_PAGE));
- run = &miscelm->run;
-
- arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
- alloc_size - leadsize);
- }
- if (trailsize != 0) {
- arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
- trailsize, usize + large_pad, false);
- }
- if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
- size_t run_ind =
- arena_miscelm_to_pageind(arena_run_to_miscelm(run));
- bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
- bool decommitted = (arena_mapbits_decommitted_get(chunk,
- run_ind) != 0);
-
- assert(decommitted); /* Cause of OOM. */
- arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (NULL);
- }
- ret = arena_miscelm_to_rpages(miscelm);
-
- if (config_stats) {
- szind_t index = size2index(usize) - NBINS;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += usize;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ if (unlikely(arena == NULL)) {
+ return NULL;
+ }
- if (config_fill && !zero) {
- if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
- else if (unlikely(opt_zero))
- memset(ret, 0, usize);
+ if (likely(size <= SMALL_MAXCLASS)) {
+ return arena_malloc_small(tsdn, arena, ind, zero);
}
- arena_decay_tick(tsdn, arena);
- return (ret);
+ return large_malloc(tsdn, arena, sz_index2size(ind), zero);
}
void *
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero, tcache_t *tcache)
-{
+ bool zero, tcache_t *tcache) {
void *ret;
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
- /* Small; alignment doesn't require special run placement. */
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
- } else if (usize <= large_maxclass && alignment <= PAGE) {
- /*
- * Large; alignment doesn't require special run placement.
- * However, the cached pointer may be at a random offset from
- * the base of the run, so do some bit manipulation to retrieve
- * the base.
- */
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
- if (config_cache_oblivious)
- ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
+ /* Small; alignment doesn't require special slab placement. */
+ ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
} else {
- if (likely(usize <= large_maxclass)) {
- ret = arena_palloc_large(tsdn, arena, usize, alignment,
- zero);
- } else if (likely(alignment <= chunksize))
- ret = huge_malloc(tsdn, arena, usize, zero);
- else {
- ret = huge_palloc(tsdn, arena, usize, alignment, zero);
+ if (likely(alignment <= CACHELINE)) {
+ ret = large_malloc(tsdn, arena, usize, zero);
+ } else {
+ ret = large_palloc(tsdn, arena, usize, alignment, zero);
}
}
- return (ret);
+ return ret;
}
void
-arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
-{
- arena_chunk_t *chunk;
- size_t pageind;
- szind_t binind;
+arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
+ assert(usize <= SMALL_MAXCLASS);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+ arena_t *arena = extent_arena_get(extent);
+
+ szind_t szind = sz_size2index(usize);
+ extent_szind_set(extent, szind);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ szind, false);
+ prof_accum_cancel(tsdn, &arena->prof_accum, usize);
+
+ assert(isalloc(tsdn, ptr) == usize);
+}
+
+static size_t
+arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
- assert(size <= SMALL_MAXCLASS);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = size2index(size);
- assert(binind < NBINS);
- arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, ptr, true) == size);
+ extent_szind_set(extent, NBINS);
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ NBINS, false);
+
+ assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
+
+ return LARGE_MINCLASS;
+}
+
+void
+arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ bool slow_path) {
+ cassert(config_prof);
+ assert(opt_prof);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize = arena_prof_demote(tsdn, extent, ptr);
+ if (usize <= tcache_maxclass) {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ sz_size2index(usize), slow_path);
+ } else {
+ large_dalloc(tsdn, extent);
+ }
}
static void
-arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
-
- /* Dissociate run from bin. */
- if (run == bin->runcur)
- bin->runcur = NULL;
- else {
- szind_t binind = arena_bin_index(extent_node_arena_get(
- &chunk->node), bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
+arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) {
+ /* Dissociate slab from bin. */
+ if (slab == bin->slabcur) {
+ bin->slabcur = NULL;
+ } else {
+ szind_t binind = extent_szind_get(slab);
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
- * run only contains one region, then it never gets inserted
- * into the non-full runs tree.
+ * slab only contains one region, then it never gets inserted
+ * into the non-full slabs heap.
*/
- if (bin_info->nregs != 1) {
- arena_chunk_map_misc_t *miscelm =
- arena_run_to_miscelm(run);
-
- arena_run_heap_remove(&bin->runs, miscelm);
+ if (bin_info->nregs == 1) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ } else {
+ arena_bin_slabs_nonfull_remove(bin, slab);
}
}
}
static void
-arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin)
-{
-
- assert(run != bin->runcur);
+arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin) {
+ assert(slab != bin->slabcur);
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_dalloc(tsdn, arena, run, true, false, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_slab_dalloc(tsdn, arena, slab);
/****************************/
malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats)
- bin->stats.curruns--;
+ if (config_stats) {
+ bin->stats.curslabs--;
+ }
}
static void
-arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
-{
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin) {
+ assert(extent_nfree_get(slab) > 0);
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the
- * oldest/lowest non-full run. It is okay to NULL runcur out rather
+ * Make sure that if bin->slabcur is non-NULL, it refers to the
+ * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
* than proactively keeping it pointing at the oldest/lowest non-full
- * run.
+ * slab.
*/
- if (bin->runcur != NULL &&
- arena_snad_comp(arena_run_to_miscelm(bin->runcur),
- arena_run_to_miscelm(run)) > 0) {
- /* Switch runcur. */
- if (bin->runcur->nfree > 0)
- arena_bin_runs_insert(bin, bin->runcur);
- bin->runcur = run;
- if (config_stats)
- bin->stats.reruns++;
- } else
- arena_bin_runs_insert(bin, run);
+ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ /* Switch slabcur. */
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
+ } else {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ }
+ bin->slabcur = slab;
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ } else {
+ arena_bin_slabs_nonfull_insert(bin, slab);
+ }
}
static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
-{
- size_t pageind, rpages_ind;
- arena_run_t *run;
- arena_bin_t *bin;
- arena_bin_info_t *bin_info;
- szind_t binind;
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
- binind = run->binind;
- bin = &arena->bins[binind];
- bin_info = &arena_bin_info[binind];
-
- if (!junked && config_fill && unlikely(opt_junk_free))
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ void *ptr, bool junked) {
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ szind_t binind = extent_szind_get(slab);
+ arena_bin_t *bin = &arena->bins[binind];
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
+ }
- arena_run_reg_dalloc(run, ptr);
- if (run->nfree == bin_info->nregs) {
- arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
- } else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, run, bin);
+ arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
+ unsigned nfree = extent_nfree_get(slab);
+ if (nfree == bin_info->nregs) {
+ arena_dissociate_bin_slab(arena, slab, bin);
+ arena_dalloc_bin_slab(tsdn, arena, slab, bin);
+ } else if (nfree == 1 && slab != bin->slabcur) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
if (config_stats) {
bin->stats.ndalloc++;
@@ -2974,633 +1723,224 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
-{
-
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ void *ptr) {
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
}
-void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_bits_t *bitselm)
-{
- arena_run_t *run;
- arena_bin_t *bin;
- size_t rpages_ind;
+static void
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
+ szind_t binind = extent_szind_get(extent);
+ arena_bin_t *bin = &arena->bins[binind];
- rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
- bin = &arena->bins[run->binind];
malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
void
-arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind)
-{
- arena_chunk_map_bits_t *bitselm;
+arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ arena_t *arena = extent_arena_get(extent);
- if (config_debug) {
- /* arena_ptr_small_binind_get() does extra sanity checking. */
- assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
- pageind)) != BININD_INVALID);
- }
- bitselm = arena_bitselm_get_mutable(chunk, pageind);
- arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
+ arena_dalloc_bin(tsdn, arena, extent, ptr);
arena_decay_tick(tsdn, arena);
}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
-#endif
-void
-arena_dalloc_junk_large(void *ptr, size_t usize)
-{
-
- if (config_fill && unlikely(opt_junk_free))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
-arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(n_arena_dalloc_junk_large);
-#endif
-
-static void
-arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, bool junked)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
- arena_run_t *run = &miscelm->run;
-
- if (config_fill || config_stats) {
- size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
- large_pad;
-
- if (!junked)
- arena_dalloc_junk_large(ptr, usize);
- if (config_stats) {
- szind_t index = size2index(usize) - NBINS;
-
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= usize;
- arena->stats.lstats[index].ndalloc++;
- arena->stats.lstats[index].curruns--;
- }
- }
-
- arena_run_dalloc(tsdn, arena, run, true, false, false);
-}
-
-void
-arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr)
-{
-
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
-}
-
-void
-arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr)
-{
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
- arena_decay_tick(tsdn, arena);
-}
-
-static void
-arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
- arena_run_t *run = &miscelm->run;
-
- assert(size < oldsize);
-
- /*
- * Shrink the run, and make trailing pages available for other
- * allocations.
- */
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
- large_pad, true);
- if (config_stats) {
- szind_t oldindex = size2index(oldsize) - NBINS;
- szind_t index = size2index(size) - NBINS;
-
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[oldindex].ndalloc++;
- arena->stats.lstats[oldindex].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-static bool
-arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = (oldsize + large_pad) >> LG_PAGE;
- size_t followsize;
-
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
- large_pad);
-
- /* Try to extend the run. */
- malloc_mutex_lock(tsdn, &arena->lock);
- if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
- pageind+npages) != 0)
- goto label_fail;
- followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
- if (oldsize + followsize >= usize_min) {
- /*
- * The next run is available and sufficiently large. Split the
- * following run, then merge the first part with the existing
- * allocation.
- */
- arena_run_t *run;
- size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
-
- usize = usize_max;
- while (oldsize + followsize < usize)
- usize = index2size(size2index(usize)-1);
- assert(usize >= usize_min);
- assert(usize >= oldsize);
- splitsize = usize - oldsize;
- if (splitsize == 0)
- goto label_fail;
-
- run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
- if (arena_run_split_large(arena, run, splitsize, zero))
- goto label_fail;
-
- if (config_cache_oblivious && zero) {
- /*
- * Zero the trailing bytes of the original allocation's
- * last page, since they are in an indeterminate state.
- * There will always be trailing bytes, because ptr's
- * offset from the beginning of the run is a multiple of
- * CACHELINE in [0 .. PAGE).
- */
- void *zbase = (void *)((uintptr_t)ptr + oldsize);
- void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
- PAGE));
- size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
- assert(nzero > 0);
- memset(zbase, 0, nzero);
- }
-
- size = oldsize + splitsize;
- npages = (size + large_pad) >> LG_PAGE;
-
- /*
- * Mark the extended run as dirty if either portion of the run
- * was dirty before allocation. This is rather pedantic,
- * because there's not actually any sequence of events that
- * could cause the resulting run to be passed to
- * arena_run_dalloc() with the dirty argument set to false
- * (which is when dirty flag consistency would really matter).
- */
- flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
- arena_mapbits_dirty_get(chunk, pageind+npages-1);
- flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
- arena_mapbits_large_set(chunk, pageind, size + large_pad,
- flag_dirty | (flag_unzeroed_mask &
- arena_mapbits_unzeroed_get(chunk, pageind)));
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+npages-1)));
-
- if (config_stats) {
- szind_t oldindex = size2index(oldsize) - NBINS;
- szind_t index = size2index(size) - NBINS;
-
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[oldindex].ndalloc++;
- arena->stats.lstats[oldindex].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (false);
- }
-label_fail:
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (true);
-}
-
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
-#endif
-static void
-arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
-{
-
- if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
- old_usize - usize);
- }
-}
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
-arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(n_arena_ralloc_junk_large);
-#endif
-
-/*
- * Try to resize a large allocation, in order to avoid copying. This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
- size_t usize_max, bool zero)
-{
- arena_chunk_t *chunk;
- arena_t *arena;
-
- if (oldsize == usize_max) {
- /* Current size class is compatible and maximal. */
- return (false);
- }
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = extent_node_arena_get(&chunk->node);
-
- if (oldsize < usize_max) {
- bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
- oldsize, usize_min, usize_max, zero);
- if (config_fill && !ret && !zero) {
- if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK,
- isalloc(tsdn, ptr, config_prof) - oldsize);
- } else if (unlikely(opt_zero)) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- isalloc(tsdn, ptr, config_prof) - oldsize);
- }
- }
- return (ret);
- }
-
- assert(oldsize > usize_max);
- /* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, usize_max);
- arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
- return (false);
-}
-
bool
arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero)
-{
- size_t usize_min, usize_max;
-
+ size_t extra, bool zero) {
/* Calls with non-zero extra had to clamp extra. */
- assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
+ assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
- if (unlikely(size > HUGE_MAXCLASS))
- return (true);
-
- usize_min = s2u(size);
- usize_max = s2u(size + extra);
- if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
- arena_chunk_t *chunk;
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return true;
+ }
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize_min = sz_s2u(size);
+ size_t usize_max = sz_s2u(size + extra);
+ if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
- if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[size2index(oldsize)].reg_size ==
- oldsize);
- if ((usize_max > SMALL_MAXCLASS ||
- size2index(usize_max) != size2index(oldsize)) &&
- (size > oldsize || usize_max < oldsize))
- return (true);
- } else {
- if (usize_max <= SMALL_MAXCLASS)
- return (true);
- if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
- usize_max, zero))
- return (true);
+ assert(arena_bin_info[sz_size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
+ sz_size2index(oldsize)) && (size > oldsize || usize_max <
+ oldsize)) {
+ return true;
}
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
- return (false);
- } else {
- return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
- usize_max, zero));
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
+ return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ zero);
}
+
+ return true;
}
static void *
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache)
-{
-
- if (alignment == 0)
- return (arena_malloc(tsdn, arena, usize, size2index(usize),
- zero, tcache, true));
- usize = sa2u(usize, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
- return (NULL);
- return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+ size_t alignment, bool zero, tcache_t *tcache) {
+ if (alignment == 0) {
+ return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
+ }
+ usize = sz_sa2u(usize, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
}
void *
-arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero, tcache_t *tcache)
-{
- void *ret;
- size_t usize;
-
- usize = s2u(size);
- if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
- return (NULL);
-
- if (likely(usize <= large_maxclass)) {
- size_t copysize;
+arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache) {
+ size_t usize = sz_s2u(size);
+ if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ if (likely(usize <= SMALL_MAXCLASS)) {
/* Try to avoid moving the allocation. */
- if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
- zero))
- return (ptr);
+ if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
+ return ptr;
+ }
+ }
- /*
- * size and oldsize are different enough that we need to move
- * the object. In that case, fall back to allocating new space
- * and copying.
- */
- ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
+ if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
+ return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
alignment, zero, tcache);
- if (ret == NULL)
- return (NULL);
-
- /*
- * Junk/zero-filling were already done by
- * ipalloc()/arena_malloc().
- */
+ }
- copysize = (usize < oldsize) ? usize : oldsize;
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
- memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
- } else {
- ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
- zero, tcache);
+ /*
+ * size and oldsize are different enough that we need to move the
+ * object. In that case, fall back to allocating new space and copying.
+ */
+ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
+ zero, tcache);
+ if (ret == NULL) {
+ return NULL;
}
- return (ret);
+
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
+
+ size_t copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
+ return ret;
}
dss_prec_t
-arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
-{
- dss_prec_t ret;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- ret = arena->dss_prec;
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (ret);
+arena_dss_prec_get(arena_t *arena) {
+ return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
}
bool
-arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
-{
-
- if (!have_dss)
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
+ if (!have_dss) {
return (dss_prec != dss_prec_disabled);
- malloc_mutex_lock(tsdn, &arena->lock);
- arena->dss_prec = dss_prec;
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (false);
+ }
+ atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
+ return false;
}
ssize_t
-arena_lg_dirty_mult_default_get(void)
-{
-
- return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
+arena_dirty_decay_ms_default_get(void) {
+ return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
}
bool
-arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
-{
-
- if (opt_purge != purge_mode_ratio)
- return (true);
- if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
- return (true);
- atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
- return (false);
+arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
+ }
+ atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
ssize_t
-arena_decay_time_default_get(void)
-{
-
- return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
+arena_muzzy_decay_ms_default_get(void) {
+ return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
}
bool
-arena_decay_time_default_set(ssize_t decay_time)
-{
-
- if (opt_purge != purge_mode_decay)
- return (true);
- if (!arena_decay_time_valid(decay_time))
- return (true);
- atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
- return (false);
-}
-
-static void
-arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty)
-{
-
- *nthreads += arena_nthreads_get(arena, false);
- *dss = dss_prec_names[arena->dss_prec];
- *lg_dirty_mult = arena->lg_dirty_mult;
- *decay_time = arena->decay.time;
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
-}
-
-void
-arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty)
-{
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
- decay_time, nactive, ndirty);
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty, arena_stats_t *astats,
- malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
- malloc_huge_stats_t *hstats)
-{
- unsigned i;
-
- cassert(config_stats);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
- decay_time, nactive, ndirty);
-
- astats->mapped += arena->stats.mapped;
- astats->retained += arena->stats.retained;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->metadata_mapped += arena->stats.metadata_mapped;
- astats->metadata_allocated += arena_metadata_allocated_get(arena);
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
- astats->allocated_huge += arena->stats.allocated_huge;
- astats->nmalloc_huge += arena->stats.nmalloc_huge;
- astats->ndalloc_huge += arena->stats.ndalloc_huge;
-
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
- }
-
- for (i = 0; i < nhclasses; i++) {
- hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
- hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
- hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
-
- malloc_mutex_lock(tsdn, &bin->lock);
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
- bstats[i].curregs += bin->stats.curregs;
- if (config_tcache) {
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
- }
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(tsdn, &bin->lock);
+arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
}
+ atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
unsigned
-arena_nthreads_get(arena_t *arena, bool internal)
-{
-
- return (atomic_read_u(&arena->nthreads[internal]));
+arena_nthreads_get(arena_t *arena, bool internal) {
+ return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
}
void
-arena_nthreads_inc(arena_t *arena, bool internal)
-{
-
- atomic_add_u(&arena->nthreads[internal], 1);
+arena_nthreads_inc(arena_t *arena, bool internal) {
+ atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
void
-arena_nthreads_dec(arena_t *arena, bool internal)
-{
-
- atomic_sub_u(&arena->nthreads[internal], 1);
+arena_nthreads_dec(arena_t *arena, bool internal) {
+ atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
size_t
-arena_extent_sn_next(arena_t *arena)
-{
-
- return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
+arena_extent_sn_next(arena_t *arena) {
+ return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
}
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind)
-{
+arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
+ base_t *base;
unsigned i;
- /*
- * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
- * because there is no way to clean up if base_alloc() OOMs.
- */
- if (config_stats) {
- arena = (arena_t *)base_alloc(tsdn,
- CACHELINE_CEILING(sizeof(arena_t)) +
- QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
- + (nhclasses * sizeof(malloc_huge_stats_t)));
- } else
- arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
- if (arena == NULL)
- return (NULL);
-
- arena->ind = ind;
- arena->nthreads[0] = arena->nthreads[1] = 0;
- if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
- return (NULL);
+ if (ind == 0) {
+ base = b0get();
+ } else {
+ base = base_new(tsdn, ind, extent_hooks);
+ if (base == NULL) {
+ return NULL;
+ }
+ }
+
+ arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
+ if (arena == NULL) {
+ goto label_error;
+ }
+
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ arena->last_thd = NULL;
if (config_stats) {
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
- + CACHELINE_CEILING(sizeof(arena_t)));
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
- + CACHELINE_CEILING(sizeof(arena_t)) +
- QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
- memset(arena->stats.hstats, 0, nhclasses *
- sizeof(malloc_huge_stats_t));
- if (config_tcache)
- ql_new(&arena->tcache_ql);
- }
-
- if (config_prof)
- arena->prof_accumbytes = 0;
+ if (arena_stats_init(tsdn, &arena->stats)) {
+ goto label_error;
+ }
+
+ ql_new(&arena->tcache_ql);
+ if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
+ WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+ }
+
+ if (config_prof) {
+ if (prof_accum_init(tsdn, &arena->prof_accum)) {
+ goto label_error;
+ }
+ }
if (config_cache_oblivious) {
/*
@@ -3610,341 +1950,201 @@ arena_new(tsdn_t *tsdn, unsigned ind)
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
- arena->offset_state = config_debug ? ind :
- (size_t)(uintptr_t)arena;
+ atomic_store_zu(&arena->offset_state, config_debug ? ind :
+ (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
}
- arena->dss_prec = chunk_dss_prec_get();
-
- ql_new(&arena->achunks);
-
- arena->extent_sn_next = 0;
-
- arena->spare = NULL;
+ atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
- arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
- arena->purging = false;
- arena->nactive = 0;
- arena->ndirty = 0;
+ atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
+ ATOMIC_RELAXED);
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
- qr_new(&arena->runs_dirty, rd_link);
- qr_new(&arena->chunks_cache, cc_link);
-
- if (opt_purge == purge_mode_decay)
- arena_decay_init(arena, arena_decay_time_default_get());
-
- ql_new(&arena->huge);
- if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
- WITNESS_RANK_ARENA_HUGE))
- return (NULL);
-
- extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
- extent_tree_ad_new(&arena->chunks_ad_cached);
- extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
- extent_tree_ad_new(&arena->chunks_ad_retained);
- if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
- WITNESS_RANK_ARENA_CHUNKS))
- return (NULL);
- ql_new(&arena->node_cache);
- if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
- WITNESS_RANK_ARENA_NODE_CACHE))
- return (NULL);
-
- arena->chunk_hooks = chunk_hooks_default;
-
- /* Initialize bins. */
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock, "arena_bin",
- WITNESS_RANK_ARENA_BIN))
- return (NULL);
- bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
- if (config_stats)
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ extent_list_init(&arena->large);
+ if (malloc_mutex_init(&arena->large_mtx, "arena_large",
+ WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
+ goto label_error;
}
- return (arena);
-}
-
-/*
- * Calculate bin_info->run_size such that it meets the following constraints:
- *
- * *) bin_info->run_size <= arena_maxrun
- * *) bin_info->nregs <= RUN_MAXREGS
- *
- * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
- * these settings are all interdependent.
- */
-static void
-bin_info_run_size_calc(arena_bin_info_t *bin_info)
-{
- size_t pad_size;
- size_t try_run_size, perfect_run_size, actual_run_size;
- uint32_t try_nregs, perfect_nregs, actual_nregs;
-
/*
- * Determine redzone size based on minimum alignment and minimum
- * redzone size. Add padding to the end of the run if it is needed to
- * align the regions. The padding allows each redzone to be half the
- * minimum alignment; without the padding, each redzone would have to
- * be twice as large in order to maintain alignment.
+ * Delay coalescing for dirty extents despite the disruptive effect on
+ * memory layout for best-fit extent allocation, since cached extents
+ * are likely to be reused soon after deallocation, and the cost of
+ * merging/splitting extents is non-trivial.
*/
- if (config_fill && unlikely(opt_redzone)) {
- size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
- if (align_min <= REDZONE_MINSIZE) {
- bin_info->redzone_size = REDZONE_MINSIZE;
- pad_size = 0;
- } else {
- bin_info->redzone_size = align_min >> 1;
- pad_size = bin_info->redzone_size;
- }
- } else {
- bin_info->redzone_size = 0;
- pad_size = 0;
+ if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
+ true)) {
+ goto label_error;
}
- bin_info->reg_interval = bin_info->reg_size +
- (bin_info->redzone_size << 1);
-
/*
- * Compute run size under ideal conditions (no redzones, no limit on run
- * size).
+ * Coalesce muzzy extents immediately, because operations on them are in
+ * the critical path much less often than for dirty extents.
*/
- try_run_size = PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
- do {
- perfect_run_size = try_run_size;
- perfect_nregs = try_nregs;
-
- try_run_size += PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
- } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
- assert(perfect_nregs <= RUN_MAXREGS);
-
- actual_run_size = perfect_run_size;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
-
- /*
- * Redzones can require enough padding that not even a single region can
- * fit within the number of pages that would normally be dedicated to a
- * run for this size class. Increase the run size until at least one
- * region fits.
- */
- while (actual_nregs == 0) {
- assert(config_fill && unlikely(opt_redzone));
-
- actual_run_size += PAGE;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
+ false)) {
+ goto label_error;
}
-
/*
- * Make sure that the run will fit within an arena chunk.
+ * Coalesce retained extents immediately, in part because they will
+ * never be evicted (and therefore there's no opportunity for delayed
+ * coalescing), but also because operations on retained extents are not
+ * in the critical path.
*/
- while (actual_run_size > arena_maxrun) {
- actual_run_size -= PAGE;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
+ false)) {
+ goto label_error;
}
- assert(actual_nregs > 0);
- assert(actual_run_size == s2u(actual_run_size));
-
- /* Copy final settings. */
- bin_info->run_size = actual_run_size;
- bin_info->nregs = actual_nregs;
- bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
- bin_info->reg_interval) - pad_size + bin_info->redzone_size);
-
- assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
- * bin_info->reg_interval) + pad_size == bin_info->run_size);
-}
-
-static void
-bin_info_init(void)
-{
- arena_bin_info_t *bin_info;
-
-#define BIN_INFO_INIT_bin_yes(index, size) \
- bin_info = &arena_bin_info[index]; \
- bin_info->reg_size = size; \
- bin_info_run_size_calc(bin_info); \
- bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
-#define BIN_INFO_INIT_bin_no(index, size)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
- BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
- SIZE_CLASSES
-#undef BIN_INFO_INIT_bin_yes
-#undef BIN_INFO_INIT_bin_no
-#undef SC
-}
-
-static void
-init_thp_initially_huge(void) {
- int fd;
- char buf[sizeof("[always] madvise never\n")];
- ssize_t nread;
- static const char *enabled_states[] = {
- "[always] madvise never\n",
- "always [madvise] never\n",
- "always madvise [never]\n"
- };
- static const bool thp_initially_huge_states[] = {
- true,
- false,
- false
- };
- unsigned i;
- if (config_debug) {
- for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
- i++) {
- assert(sizeof(buf) > strlen(enabled_states[i]));
- }
+ if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty,
+ arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
+ goto label_error;
}
- assert(sizeof(enabled_states)/sizeof(const char *) ==
- sizeof(thp_initially_huge_states)/sizeof(bool));
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
- fd = (int)syscall(SYS_open,
- "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
-#else
- fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
-#endif
- if (fd == -1) {
+ if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy,
+ arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
goto label_error;
}
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
- nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
-#else
- nread = read(fd, &buf, sizeof(buf));
-#endif
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
- syscall(SYS_close, fd);
-#else
- close(fd);
-#endif
+ arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
+ if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
+ WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
- if (nread < 1) {
+ extent_avail_new(&arena->extent_avail);
+ if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
+ WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
goto label_error;
}
- for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
- i++) {
- if (strncmp(buf, enabled_states[i], (size_t)nread) == 0) {
- thp_initially_huge = thp_initially_huge_states[i];
- return;
+
+ /* Initialize bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock, "arena_bin",
+ WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+ bin->slabcur = NULL;
+ extent_heap_new(&bin->slabs_nonfull);
+ extent_list_init(&bin->slabs_full);
+ if (config_stats) {
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
}
-label_error:
- thp_initially_huge = false;
-}
+ arena->base = base;
+ /* Set arena before creating background threads. */
+ arena_set(ind, arena);
-void
-arena_boot(void)
-{
- unsigned i;
+ nstime_init(&arena->create_time, 0);
+ nstime_update(&arena->create_time);
- if (config_thp && opt_thp) {
- init_thp_initially_huge();
+ /* We don't support reentrancy for arena 0 bootstrapping. */
+ if (ind != 0) {
+ /*
+ * If we're here, then arena 0 already exists, so bootstrapping
+ * is done enough that we should have tsd.
+ */
+ assert(!tsdn_null(tsdn));
+ pre_reentrancy(tsdn_tsd(tsdn));
+ if (hooks_arena_new_hook) {
+ hooks_arena_new_hook();
+ }
+ post_reentrancy(tsdn_tsd(tsdn));
}
- arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
- arena_decay_time_default_set(opt_decay_time);
-
- /*
- * Compute the header size such that it is large enough to contain the
- * page map. The page map is biased to omit entries for the header
- * itself, so some iteration is necessary to compute the map bias.
- *
- * 1) Compute safe header_size and map_bias values that include enough
- * space for an unbiased page map.
- * 2) Refine map_bias based on (1) to omit the header pages in the page
- * map. The resulting map_bias may be one too small.
- * 3) Refine map_bias based on (2). The result will be >= the result
- * from (2), and will always be correct.
- */
- map_bias = 0;
- for (i = 0; i < 3; i++) {
- size_t header_size = offsetof(arena_chunk_t, map_bits) +
- ((sizeof(arena_chunk_map_bits_t) +
- sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
- map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
+ return arena;
+label_error:
+ if (ind != 0) {
+ base_delete(base);
}
- assert(map_bias > 0);
-
- map_misc_offset = offsetof(arena_chunk_t, map_bits) +
- sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
-
- arena_maxrun = chunksize - (map_bias << LG_PAGE);
- assert(arena_maxrun > 0);
- large_maxclass = index2size(size2index(chunksize)-1);
- assert(large_maxclass > 0);
- assert(large_maxclass + large_pad <= arena_maxrun);
- nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
- nhclasses = NSIZES - nlclasses - NBINS;
-
- bin_info_init();
+ return NULL;
}
void
-arena_prefork0(tsdn_t *tsdn, arena_t *arena)
-{
+arena_boot(void) {
+ arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
+ arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
+}
- malloc_mutex_prefork(tsdn, &arena->lock);
+void
+arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
}
void
-arena_prefork1(tsdn_t *tsdn, arena_t *arena)
-{
+arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
+ if (config_stats) {
+ malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
+ }
+}
- malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
+ extents_prefork(tsdn, &arena->extents_dirty);
+ extents_prefork(tsdn, &arena->extents_muzzy);
+ extents_prefork(tsdn, &arena->extents_retained);
}
void
-arena_prefork2(tsdn_t *tsdn, arena_t *arena)
-{
+arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+}
- malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
+void
+arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
+ base_prefork(tsdn, arena->base);
}
void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena)
-{
- unsigned i;
+arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->large_mtx);
+}
- for (i = 0; i < NBINS; i++)
+void
+arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
+ for (unsigned i = 0; i < NBINS; i++) {
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
- malloc_mutex_prefork(tsdn, &arena->huge_mtx);
+ }
}
void
-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
-{
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
- for (i = 0; i < NBINS; i++)
+ for (i = 0; i < NBINS; i++) {
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
- malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->lock);
+ }
+ malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
+ base_postfork_parent(tsdn, arena->base);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_parent(tsdn, &arena->extents_dirty);
+ extents_postfork_parent(tsdn, &arena->extents_muzzy);
+ extents_postfork_parent(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
+ }
}
void
-arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
-{
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
- for (i = 0; i < NBINS; i++)
+ for (i = 0; i < NBINS; i++) {
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
- malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->lock);
+ }
+ malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
+ base_postfork_child(tsdn, arena->base);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_child(tsdn, &arena->extents_dirty);
+ extents_postfork_child(tsdn, &arena->extents_muzzy);
+ extents_postfork_child(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
+ }
}
diff --git a/contrib/jemalloc/src/atomic.c b/contrib/jemalloc/src/atomic.c
deleted file mode 100644
index 77ee313113be..000000000000
--- a/contrib/jemalloc/src/atomic.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_ATOMIC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/contrib/jemalloc/src/background_thread.c b/contrib/jemalloc/src/background_thread.c
new file mode 100644
index 000000000000..1ff594476f00
--- /dev/null
+++ b/contrib/jemalloc/src/background_thread.c
@@ -0,0 +1,846 @@
+#define JEMALLOC_BACKGROUND_THREAD_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+
+/******************************************************************************/
+/* Data. */
+
+/* This option should be opt-in only. */
+#define BACKGROUND_THREAD_DEFAULT false
+/* Read-only after initialization. */
+bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
+
+/* Used for thread creation, termination and stats. */
+malloc_mutex_t background_thread_lock;
+/* Indicates global state. Atomic because decay reads this w/o locking. */
+atomic_b_t background_thread_enabled_state;
+size_t n_background_threads;
+/* Thread info per-index. */
+background_thread_info_t *background_thread_info;
+
+/* False if no necessary runtime support. */
+bool can_enable_background_thread;
+
+/******************************************************************************/
+
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+#include <dlfcn.h>
+
+static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+
+static void
+pthread_create_wrapper_once(void) {
+#ifdef JEMALLOC_LAZY_LOCK
+ isthreaded = true;
+#endif
+}
+
+int
+pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *__restrict arg) {
+ pthread_once(&once_control, pthread_create_wrapper_once);
+
+ return pthread_create_fptr(thread, attr, start_routine, arg);
+}
+#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
+
+#ifndef JEMALLOC_BACKGROUND_THREAD
+#define NOT_REACHED { not_reached(); }
+bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
+bool background_threads_enable(tsd_t *tsd) NOT_REACHED
+bool background_threads_disable(tsd_t *tsd) NOT_REACHED
+void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, size_t npages_new) NOT_REACHED
+void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
+void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
+void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
+void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
+bool background_thread_stats_read(tsdn_t *tsdn,
+ background_thread_stats_t *stats) NOT_REACHED
+void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
+#undef NOT_REACHED
+#else
+
+static bool background_thread_enabled_at_fork;
+
+static void
+background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
+ background_thread_wakeup_time_set(tsdn, info, 0);
+ info->npages_to_purge_new = 0;
+ if (config_stats) {
+ info->tot_n_runs = 0;
+ nstime_init(&info->tot_sleep_time, 0);
+ }
+}
+
+static inline bool
+set_current_thread_affinity(UNUSED int cpu) {
+#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
+
+ return (ret != 0);
+#else
+ return false;
+#endif
+}
+
+/* Threshold for determining when to wake up the background thread. */
+#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
+#define BILLION UINT64_C(1000000000)
+/* Minimal sleep interval 100 ms. */
+#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
+
+static inline size_t
+decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
+ size_t i;
+ uint64_t sum = 0;
+ for (i = 0; i < interval; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ for (; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
+ }
+
+ return (size_t)(sum >> SMOOTHSTEP_BFP);
+}
+
+static uint64_t
+arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
+ extents_t *extents) {
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* Use minimal interval if decay is contended. */
+ return BACKGROUND_THREAD_MIN_INTERVAL_NS;
+ }
+
+ uint64_t interval;
+ ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+ if (decay_time <= 0) {
+ /* Purging is eagerly done or disabled currently. */
+ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ goto label_done;
+ }
+
+ uint64_t decay_interval_ns = nstime_ns(&decay->interval);
+ assert(decay_interval_ns > 0);
+ size_t npages = extents_npages_get(extents);
+ if (npages == 0) {
+ unsigned i;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ if (decay->backlog[i] > 0) {
+ break;
+ }
+ }
+ if (i == SMOOTHSTEP_NSTEPS) {
+ /* No dirty pages recorded. Sleep indefinitely. */
+ interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ goto label_done;
+ }
+ }
+ if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ /* Use max interval. */
+ interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
+ goto label_done;
+ }
+
+ size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
+ size_t ub = SMOOTHSTEP_NSTEPS;
+ /* Minimal 2 intervals to ensure reaching next epoch deadline. */
+ lb = (lb < 2) ? 2 : lb;
+ if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
+ (lb + 2 > ub)) {
+ interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
+ goto label_done;
+ }
+
+ assert(lb + 2 <= ub);
+ size_t npurge_lb, npurge_ub;
+ npurge_lb = decay_npurge_after_interval(decay, lb);
+ if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ interval = decay_interval_ns * lb;
+ goto label_done;
+ }
+ npurge_ub = decay_npurge_after_interval(decay, ub);
+ if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ interval = decay_interval_ns * ub;
+ goto label_done;
+ }
+
+ unsigned n_search = 0;
+ size_t target, npurge;
+ while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
+ && (lb + 2 < ub)) {
+ target = (lb + ub) / 2;
+ npurge = decay_npurge_after_interval(decay, target);
+ if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ ub = target;
+ npurge_ub = npurge;
+ } else {
+ lb = target;
+ npurge_lb = npurge;
+ }
+ assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
+ }
+ interval = decay_interval_ns * (ub + lb) / 2;
+label_done:
+ interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
+ BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ return interval;
+}
+
+/* Compute purge interval for background threads. */
+static uint64_t
+arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
+ uint64_t i1, i2;
+ i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
+ &arena->extents_dirty);
+ if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ return i1;
+ }
+ i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
+ &arena->extents_muzzy);
+
+ return i1 < i2 ? i1 : i2;
+}
+
+static void
+background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
+ uint64_t interval) {
+ if (config_stats) {
+ info->tot_n_runs++;
+ }
+ info->npages_to_purge_new = 0;
+
+ struct timeval tv;
+ /* Specific clock required by timedwait. */
+ gettimeofday(&tv, NULL);
+ nstime_t before_sleep;
+ nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
+
+ int ret;
+ if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
+ assert(background_thread_indefinite_sleep(info));
+ ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
+ assert(ret == 0);
+ } else {
+ assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
+ interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ /* We need malloc clock (can be different from tv). */
+ nstime_t next_wakeup;
+ nstime_init(&next_wakeup, 0);
+ nstime_update(&next_wakeup);
+ nstime_iadd(&next_wakeup, interval);
+ assert(nstime_ns(&next_wakeup) <
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ background_thread_wakeup_time_set(tsdn, info,
+ nstime_ns(&next_wakeup));
+
+ nstime_t ts_wakeup;
+ nstime_copy(&ts_wakeup, &before_sleep);
+ nstime_iadd(&ts_wakeup, interval);
+ struct timespec ts;
+ ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
+ ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
+
+ assert(!background_thread_indefinite_sleep(info));
+ ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
+ assert(ret == ETIMEDOUT || ret == 0);
+ background_thread_wakeup_time_set(tsdn, info,
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ }
+ if (config_stats) {
+ gettimeofday(&tv, NULL);
+ nstime_t after_sleep;
+ nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
+ if (nstime_compare(&after_sleep, &before_sleep) > 0) {
+ nstime_subtract(&after_sleep, &before_sleep);
+ nstime_add(&info->tot_sleep_time, &after_sleep);
+ }
+ }
+}
+
+static bool
+background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
+ if (unlikely(info->state == background_thread_paused)) {
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ /* Wait on global lock to update status. */
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ malloc_mutex_lock(tsdn, &info->mtx);
+ return true;
+ }
+
+ return false;
+}
+
+static inline void
+background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
+ uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ unsigned narenas = narenas_total_get();
+
+ for (unsigned i = ind; i < narenas; i += ncpus) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (!arena) {
+ continue;
+ }
+ arena_decay(tsdn, arena, true, false);
+ if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ /* Min interval will be used. */
+ continue;
+ }
+ uint64_t interval = arena_decay_compute_purge_interval(tsdn,
+ arena);
+ assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
+ if (min_interval > interval) {
+ min_interval = interval;
+ }
+ }
+ background_thread_sleep(tsdn, info, min_interval);
+}
+
+static bool
+background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
+ if (info == &background_thread_info[0]) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd),
+ &background_thread_lock);
+ } else {
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
+ &background_thread_lock);
+ }
+
+ pre_reentrancy(tsd);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ bool has_thread;
+ assert(info->state != background_thread_paused);
+ if (info->state == background_thread_started) {
+ has_thread = true;
+ info->state = background_thread_stopped;
+ pthread_cond_signal(&info->cond);
+ } else {
+ has_thread = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+
+ if (!has_thread) {
+ post_reentrancy(tsd);
+ return false;
+ }
+ void *ret;
+ if (pthread_join(info->thread, &ret)) {
+ post_reentrancy(tsd);
+ return true;
+ }
+ assert(ret == NULL);
+ n_background_threads--;
+ post_reentrancy(tsd);
+
+ return false;
+}
+
+static void *background_thread_entry(void *ind_arg);
+
+static void
+check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
+ bool *created_threads) {
+ if (likely(*n_created == n_background_threads)) {
+ return;
+ }
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
+label_restart:
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ for (unsigned i = 1; i < ncpus; i++) {
+ if (created_threads[i]) {
+ continue;
+ }
+ background_thread_info_t *info = &background_thread_info[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ assert(info->state != background_thread_paused);
+ bool create = (info->state == background_thread_started);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ if (!create) {
+ continue;
+ }
+
+ /*
+ * To avoid deadlock with prefork handlers (which waits for the
+ * mutex held here), unlock before calling pthread_create().
+ */
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+
+ pre_reentrancy(tsd);
+ int err = pthread_create_wrapper(&info->thread, NULL,
+ background_thread_entry, (void *)(uintptr_t)i);
+ post_reentrancy(tsd);
+
+ if (err == 0) {
+ (*n_created)++;
+ created_threads[i] = true;
+ } else {
+ malloc_printf("<jemalloc>: background thread "
+ "creation failed (%d)\n", err);
+ if (opt_abort) {
+ abort();
+ }
+ }
+ /* Restart since we unlocked. */
+ goto label_restart;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+}
+
+static void
+background_thread0_work(tsd_t *tsd) {
+ /* Thread0 is also responsible for launching / terminating threads. */
+ VARIABLE_ARRAY(bool, created_threads, ncpus);
+ unsigned i;
+ for (i = 1; i < ncpus; i++) {
+ created_threads[i] = false;
+ }
+ /* Start working, and create more threads when asked. */
+ unsigned n_created = 1;
+ while (background_thread_info[0].state != background_thread_stopped) {
+ if (background_thread_pause_check(tsd_tsdn(tsd),
+ &background_thread_info[0])) {
+ continue;
+ }
+ check_background_thread_creation(tsd, &n_created,
+ (bool *)&created_threads);
+ background_work_sleep_once(tsd_tsdn(tsd),
+ &background_thread_info[0], 0);
+ }
+
+ /*
+ * Shut down other threads at exit. Note that the ctl thread is holding
+ * the global background_thread mutex (and is waiting) for us.
+ */
+ assert(!background_thread_enabled());
+ for (i = 1; i < ncpus; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ assert(info->state != background_thread_paused);
+ if (created_threads[i]) {
+ background_threads_disable_single(tsd, info);
+ } else {
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ /* Clear in case the thread wasn't created. */
+ info->state = background_thread_stopped;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ }
+ background_thread_info[0].state = background_thread_stopped;
+ assert(n_background_threads == 1);
+}
+
+static void
+background_work(tsd_t *tsd, unsigned ind) {
+ background_thread_info_t *info = &background_thread_info[ind];
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
+ if (ind == 0) {
+ background_thread0_work(tsd);
+ } else {
+ while (info->state != background_thread_stopped) {
+ if (background_thread_pause_check(tsd_tsdn(tsd),
+ info)) {
+ continue;
+ }
+ background_work_sleep_once(tsd_tsdn(tsd), info, ind);
+ }
+ }
+ assert(info->state == background_thread_stopped);
+ background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+}
+
+static void *
+background_thread_entry(void *ind_arg) {
+ unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
+ assert(thread_ind < ncpus);
+
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ set_current_thread_affinity((int)thread_ind);
+ }
+ /*
+ * Start periodic background work. We use internal tsd which avoids
+ * side effects, for example triggering new arena creation (which in
+ * turn triggers another background thread creation).
+ */
+ background_work(tsd_internal_fetch(), thread_ind);
+ assert(pthread_equal(pthread_self(),
+ background_thread_info[thread_ind].thread));
+
+ return NULL;
+}
+
+static void
+background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+ info->state = background_thread_started;
+ background_thread_info_init(tsd_tsdn(tsd), info);
+ n_background_threads++;
+}
+
+/* Create a new background thread if needed. */
+bool
+background_thread_create(tsd_t *tsd, unsigned arena_ind) {
+ assert(have_background_thread);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+
+ /* We create at most NCPUs threads. */
+ size_t thread_ind = arena_ind % ncpus;
+ background_thread_info_t *info = &background_thread_info[thread_ind];
+
+ bool need_new_thread;
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ need_new_thread = background_thread_enabled() &&
+ (info->state == background_thread_stopped);
+ if (need_new_thread) {
+ background_thread_init(tsd, info);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ if (!need_new_thread) {
+ return false;
+ }
+ if (arena_ind != 0) {
+ /* Threads are created asynchronously by Thread 0. */
+ background_thread_info_t *t0 = &background_thread_info[0];
+ malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
+ assert(t0->state == background_thread_started);
+ pthread_cond_signal(&t0->cond);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
+
+ return false;
+ }
+
+ pre_reentrancy(tsd);
+ /*
+ * To avoid complications (besides reentrancy), create internal
+ * background threads with the underlying pthread_create.
+ */
+ int err = pthread_create_wrapper(&info->thread, NULL,
+ background_thread_entry, (void *)thread_ind);
+ post_reentrancy(tsd);
+
+ if (err != 0) {
+ malloc_printf("<jemalloc>: arena 0 background thread creation "
+ "failed (%d)\n", err);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_stopped;
+ n_background_threads--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+
+ return true;
+ }
+
+ return false;
+}
+
+bool
+background_threads_enable(tsd_t *tsd) {
+ assert(n_background_threads == 0);
+ assert(background_thread_enabled());
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+
+ VARIABLE_ARRAY(bool, marked, ncpus);
+ unsigned i, nmarked;
+ for (i = 0; i < ncpus; i++) {
+ marked[i] = false;
+ }
+ nmarked = 0;
+ /* Mark the threads we need to create for thread 0. */
+ unsigned n = narenas_total_get();
+ for (i = 1; i < n; i++) {
+ if (marked[i % ncpus] ||
+ arena_get(tsd_tsdn(tsd), i, false) == NULL) {
+ continue;
+ }
+ background_thread_info_t *info = &background_thread_info[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ assert(info->state == background_thread_stopped);
+ background_thread_init(tsd, info);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ marked[i % ncpus] = true;
+ if (++nmarked == ncpus) {
+ break;
+ }
+ }
+
+ return background_thread_create(tsd, 0);
+}
+
+bool
+background_threads_disable(tsd_t *tsd) {
+ assert(!background_thread_enabled());
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
+
+ /* Thread 0 will be responsible for terminating other threads. */
+ if (background_threads_disable_single(tsd,
+ &background_thread_info[0])) {
+ return true;
+ }
+ assert(n_background_threads == 0);
+
+ return false;
+}
+
+/* Check if we need to signal the background thread early. */
+void
+background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, size_t npages_new) {
+ background_thread_info_t *info = arena_background_thread_info_get(
+ arena);
+ if (malloc_mutex_trylock(tsdn, &info->mtx)) {
+ /*
+ * Background thread may hold the mutex for a long period of
+ * time. We'd like to avoid the variance on application
+ * threads. So keep this non-blocking, and leave the work to a
+ * future epoch.
+ */
+ return;
+ }
+
+ if (info->state != background_thread_started) {
+ goto label_done;
+ }
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ goto label_done;
+ }
+
+ ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+ if (decay_time <= 0) {
+ /* Purging is eagerly done or disabled currently. */
+ goto label_done_unlock2;
+ }
+ uint64_t decay_interval_ns = nstime_ns(&decay->interval);
+ assert(decay_interval_ns > 0);
+
+ nstime_t diff;
+ nstime_init(&diff, background_thread_wakeup_time_get(info));
+ if (nstime_compare(&diff, &decay->epoch) <= 0) {
+ goto label_done_unlock2;
+ }
+ nstime_subtract(&diff, &decay->epoch);
+ if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ goto label_done_unlock2;
+ }
+
+ if (npages_new > 0) {
+ size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
+ /*
+ * Compute how many new pages we would need to purge by the next
+ * wakeup, which is used to determine if we should signal the
+ * background thread.
+ */
+ uint64_t npurge_new;
+ if (n_epoch >= SMOOTHSTEP_NSTEPS) {
+ npurge_new = npages_new;
+ } else {
+ uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
+ assert(h_steps_max >=
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npurge_new = npages_new * (h_steps_max -
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npurge_new >>= SMOOTHSTEP_BFP;
+ }
+ info->npages_to_purge_new += npurge_new;
+ }
+
+ bool should_signal;
+ if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
+ should_signal = true;
+ } else if (unlikely(background_thread_indefinite_sleep(info)) &&
+ (extents_npages_get(&arena->extents_dirty) > 0 ||
+ extents_npages_get(&arena->extents_muzzy) > 0 ||
+ info->npages_to_purge_new > 0)) {
+ should_signal = true;
+ } else {
+ should_signal = false;
+ }
+
+ if (should_signal) {
+ info->npages_to_purge_new = 0;
+ pthread_cond_signal(&info->cond);
+ }
+label_done_unlock2:
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+label_done:
+ malloc_mutex_unlock(tsdn, &info->mtx);
+}
+
+void
+background_thread_prefork0(tsdn_t *tsdn) {
+ malloc_mutex_prefork(tsdn, &background_thread_lock);
+ background_thread_enabled_at_fork = background_thread_enabled();
+}
+
+void
+background_thread_prefork1(tsdn_t *tsdn) {
+ for (unsigned i = 0; i < ncpus; i++) {
+ malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
+ }
+}
+
+void
+background_thread_postfork_parent(tsdn_t *tsdn) {
+ for (unsigned i = 0; i < ncpus; i++) {
+ malloc_mutex_postfork_parent(tsdn,
+ &background_thread_info[i].mtx);
+ }
+ malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
+}
+
+void
+background_thread_postfork_child(tsdn_t *tsdn) {
+ for (unsigned i = 0; i < ncpus; i++) {
+ malloc_mutex_postfork_child(tsdn,
+ &background_thread_info[i].mtx);
+ }
+ malloc_mutex_postfork_child(tsdn, &background_thread_lock);
+ if (!background_thread_enabled_at_fork) {
+ return;
+ }
+
+ /* Clear background_thread state (reset to disabled for child). */
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ n_background_threads = 0;
+ background_thread_enabled_set(tsdn, false);
+ for (unsigned i = 0; i < ncpus; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ malloc_mutex_lock(tsdn, &info->mtx);
+ info->state = background_thread_stopped;
+ int ret = pthread_cond_init(&info->cond, NULL);
+ assert(ret == 0);
+ background_thread_info_init(tsdn, info);
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ }
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+}
+
+bool
+background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
+ assert(config_stats);
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ if (!background_thread_enabled()) {
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ return true;
+ }
+
+ stats->num_threads = n_background_threads;
+ uint64_t num_runs = 0;
+ nstime_init(&stats->run_interval, 0);
+ for (unsigned i = 0; i < ncpus; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ malloc_mutex_lock(tsdn, &info->mtx);
+ if (info->state != background_thread_stopped) {
+ num_runs += info->tot_n_runs;
+ nstime_add(&stats->run_interval, &info->tot_sleep_time);
+ }
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ }
+ stats->num_runs = num_runs;
+ if (num_runs > 0) {
+ nstime_idivide(&stats->run_interval, num_runs);
+ }
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+
+ return false;
+}
+
+#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
+#undef BILLION
+#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
+
+/*
+ * When lazy lock is enabled, we need to make sure setting isthreaded before
+ * taking any background_thread locks. This is called early in ctl (instead of
+ * wait for the pthread_create calls to trigger) because the mutex is required
+ * before creating background threads.
+ */
+void
+background_thread_ctl_init(tsdn_t *tsdn) {
+ malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+ pthread_once(&once_control, pthread_create_wrapper_once);
+#endif
+}
+
+#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
+
+bool
+background_thread_boot0(void) {
+ if (!have_background_thread && opt_background_thread) {
+ malloc_printf("<jemalloc>: option background_thread currently "
+ "supports pthread only\n");
+ return true;
+ }
+
+#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
+ pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+ if (pthread_create_fptr == NULL) {
+ can_enable_background_thread = false;
+ if (config_lazy_lock || opt_background_thread) {
+ malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+ "\"pthread_create\")\n");
+ abort();
+ }
+ } else {
+ can_enable_background_thread = true;
+ }
+#endif
+ return false;
+}
+
+bool
+background_thread_boot1(tsdn_t *tsdn) {
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ assert(have_background_thread);
+ assert(narenas_total_get() > 0);
+
+ background_thread_enabled_set(tsdn, opt_background_thread);
+ if (malloc_mutex_init(&background_thread_lock,
+ "background_thread_global",
+ WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (opt_background_thread) {
+ background_thread_ctl_init(tsdn);
+ }
+
+ background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
+ b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE);
+ if (background_thread_info == NULL) {
+ return true;
+ }
+
+ for (unsigned i = 0; i < ncpus; i++) {
+ background_thread_info_t *info = &background_thread_info[i];
+ /* Thread mutex is rank_inclusive because of thread0. */
+ if (malloc_mutex_init(&info->mtx, "background_thread",
+ WITNESS_RANK_BACKGROUND_THREAD,
+ malloc_mutex_address_ordered)) {
+ return true;
+ }
+ if (pthread_cond_init(&info->cond, NULL)) {
+ return true;
+ }
+ malloc_mutex_lock(tsdn, &info->mtx);
+ info->state = background_thread_stopped;
+ background_thread_info_init(tsdn, info);
+ malloc_mutex_unlock(tsdn, &info->mtx);
+ }
+#endif
+
+ return false;
+}
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index 5681a3f36d40..8e1544fd9ee4 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -1,187 +1,392 @@
-#define JEMALLOC_BASE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
-static malloc_mutex_t base_mtx;
-static size_t base_extent_sn_next;
-static extent_tree_t base_avail_szsnad;
-static extent_node_t *base_nodes;
-static size_t base_allocated;
-static size_t base_resident;
-static size_t base_mapped;
+static base_t *b0;
/******************************************************************************/
-static extent_node_t *
-base_node_try_alloc(tsdn_t *tsdn)
-{
- extent_node_t *node;
+static void *
+base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
+ void *addr;
+ bool zero = true;
+ bool commit = true;
+
+ assert(size == HUGEPAGE_CEILING(size));
- malloc_mutex_assert_owner(tsdn, &base_mtx);
+ if (extent_hooks == &extent_hooks_default) {
+ addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
+ } else {
+ addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
+ &zero, &commit, ind);
+ }
- if (base_nodes == NULL)
- return (NULL);
- node = base_nodes;
- base_nodes = *(extent_node_t **)node;
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
- return (node);
+ return addr;
}
static void
-base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
-{
+base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+ size_t size) {
+ /*
+ * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
+ * stopping at first success. This cascade is performed for consistency
+ * with the cascade in extent_dalloc_wrapper() because an application's
+ * custom hooks may not support e.g. dalloc. This function is only ever
+ * called as a side effect of arena destruction, so although it might
+ * seem pointless to do anything besides dalloc here, the application
+ * may in fact want the end state of all associated virtual memory to be
+ * in some consistent-but-allocated state.
+ */
+ if (extent_hooks == &extent_hooks_default) {
+ if (!extent_dalloc_mmap(addr, size)) {
+ return;
+ }
+ if (!pages_decommit(addr, size)) {
+ return;
+ }
+ if (!pages_purge_forced(addr, size)) {
+ return;
+ }
+ if (!pages_purge_lazy(addr, size)) {
+ return;
+ }
+ /* Nothing worked. This should never happen. */
+ not_reached();
+ } else {
+ if (extent_hooks->dalloc != NULL &&
+ !extent_hooks->dalloc(extent_hooks, addr, size, true,
+ ind)) {
+ return;
+ }
+ if (extent_hooks->decommit != NULL &&
+ !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
+ ind)) {
+ return;
+ }
+ if (extent_hooks->purge_forced != NULL &&
+ !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
+ size, ind)) {
+ return;
+ }
+ if (extent_hooks->purge_lazy != NULL &&
+ !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
+ ind)) {
+ return;
+ }
+ /* Nothing worked. That's the application's problem. */
+ }
+}
- malloc_mutex_assert_owner(tsdn, &base_mtx);
+static void
+base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+ size_t size) {
+ size_t sn;
+
+ sn = *extent_sn_next;
+ (*extent_sn_next)++;
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
+ extent_binit(extent, addr, size, sn);
+}
+
+static void *
+base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+ size_t alignment) {
+ void *ret;
+
+ assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
+ assert(size == ALIGNMENT_CEILING(size, alignment));
+
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
+ alignment) - (uintptr_t)extent_addr_get(extent);
+ ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
+ assert(extent_bsize_get(extent) >= *gap_size + size);
+ extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
+ *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
+ extent_sn_get(extent));
+ return ret;
}
static void
-base_extent_node_init(extent_node_t *node, void *addr, size_t size)
-{
- size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
+base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t gap_size, void *addr, size_t size) {
+ if (extent_bsize_get(extent) > 0) {
+ /*
+ * Compute the index for the largest size class that does not
+ * exceed extent's size.
+ */
+ szind_t index_floor =
+ sz_size2index(extent_bsize_get(extent) + 1) - 1;
+ extent_heap_insert(&base->avail[index_floor], extent);
+ }
- extent_node_init(node, NULL, addr, size, sn, true, true);
+ if (config_stats) {
+ base->allocated += size;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base->resident += PAGE_CEILING((uintptr_t)addr + size) -
+ PAGE_CEILING((uintptr_t)addr - gap_size);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ }
}
-static extent_node_t *
-base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
-{
- extent_node_t *node;
- size_t csize, nsize;
- void *addr;
+static void *
+base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t size, size_t alignment) {
+ void *ret;
+ size_t gap_size;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
- assert(minsize != 0);
- node = base_node_try_alloc(tsdn);
- /* Allocate enough space to also carve a node out if necessary. */
- nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
- csize = CHUNK_CEILING(minsize + nsize);
- addr = chunk_alloc_base(csize);
- if (addr == NULL) {
- if (node != NULL)
- base_node_dalloc(tsdn, node);
- return (NULL);
- }
- base_mapped += csize;
- if (node == NULL) {
- node = (extent_node_t *)addr;
- addr = (void *)((uintptr_t)addr + nsize);
- csize -= nsize;
- if (config_stats) {
- base_allocated += nsize;
- base_resident += PAGE_CEILING(nsize);
- }
+ ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
+ return ret;
+}
+
+/*
+ * Allocate a block of virtual memory that is large enough to start with a
+ * base_block_t header, followed by an object of specified size and alignment.
+ * On success a pointer to the initialized base_block_t header is returned.
+ */
+static base_block_t *
+base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
+ pszind_t *pind_last, size_t *extent_sn_next, size_t size,
+ size_t alignment) {
+ alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t header_size = sizeof(base_block_t);
+ size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
+ header_size;
+ /*
+ * Create increasingly larger blocks in order to limit the total number
+ * of disjoint virtual memory ranges. Choose the next size in the page
+ * size class series (skipping size classes that are not a multiple of
+ * HUGEPAGE), or a size large enough to satisfy the requested size and
+ * alignment, whichever is larger.
+ */
+ size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ + usize));
+ pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
+ *pind_last;
+ size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
+ size_t block_size = (min_block_size > next_block_size) ? min_block_size
+ : next_block_size;
+ base_block_t *block = (base_block_t *)base_map(extent_hooks, ind,
+ block_size);
+ if (block == NULL) {
+ return NULL;
}
- base_extent_node_init(node, addr, csize);
- return (node);
+ *pind_last = sz_psz2ind(block_size);
+ block->size = block_size;
+ block->next = NULL;
+ assert(block_size >= header_size);
+ base_extent_init(extent_sn_next, &block->extent,
+ (void *)((uintptr_t)block + header_size), block_size - header_size);
+ return block;
}
/*
- * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
- * sparse data structures such as radix tree nodes efficient with respect to
- * physical memory usage.
+ * Allocate an extent that is at least as large as specified size, with
+ * specified alignment.
*/
-void *
-base_alloc(tsdn_t *tsdn, size_t size)
-{
- void *ret;
- size_t csize, usize;
- extent_node_t *node;
- extent_node_t key;
+static extent_t *
+base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
/*
- * Round size up to nearest multiple of the cacheline size, so that
- * there is no chance of false cache line sharing.
+ * Drop mutex during base_block_alloc(), because an extent hook will be
+ * called.
*/
- csize = CACHELINE_CEILING(size);
-
- usize = s2u(csize);
- extent_node_init(&key, NULL, NULL, usize, 0, false, false);
- malloc_mutex_lock(tsdn, &base_mtx);
- node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
- if (node != NULL) {
- /* Use existing space. */
- extent_tree_szsnad_remove(&base_avail_szsnad, node);
- } else {
- /* Try to allocate more space. */
- node = base_chunk_alloc(tsdn, csize);
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ base_block_t *block = base_block_alloc(extent_hooks, base_ind_get(base),
+ &base->pind_last, &base->extent_sn_next, size, alignment);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ if (block == NULL) {
+ return NULL;
}
- if (node == NULL) {
- ret = NULL;
- goto label_return;
+ block->next = base->blocks;
+ base->blocks = block;
+ if (config_stats) {
+ base->allocated += sizeof(base_block_t);
+ base->resident += PAGE_CEILING(sizeof(base_block_t));
+ base->mapped += block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
}
+ return &block->extent;
+}
- ret = extent_node_addr_get(node);
- if (extent_node_size_get(node) > csize) {
- extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
- extent_node_size_set(node, extent_node_size_get(node) - csize);
- extent_tree_szsnad_insert(&base_avail_szsnad, node);
- } else
- base_node_dalloc(tsdn, node);
+base_t *
+b0get(void) {
+ return b0;
+}
+
+base_t *
+base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ pszind_t pind_last = 0;
+ size_t extent_sn_next = 0;
+ base_block_t *block = base_block_alloc(extent_hooks, ind, &pind_last,
+ &extent_sn_next, sizeof(base_t), QUANTUM);
+ if (block == NULL) {
+ return NULL;
+ }
+
+ size_t gap_size;
+ size_t base_alignment = CACHELINE;
+ size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
+ base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ &gap_size, base_size, base_alignment);
+ base->ind = ind;
+ atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
+ if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
+ malloc_mutex_rank_exclusive)) {
+ base_unmap(extent_hooks, ind, block, block->size);
+ return NULL;
+ }
+ base->pind_last = pind_last;
+ base->extent_sn_next = extent_sn_next;
+ base->blocks = block;
+ for (szind_t i = 0; i < NSIZES; i++) {
+ extent_heap_new(&base->avail[i]);
+ }
if (config_stats) {
- base_allocated += csize;
- /*
- * Add one PAGE to base_resident for every page boundary that is
- * crossed by the new allocation.
- */
- base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
- PAGE_CEILING((uintptr_t)ret);
+ base->allocated = sizeof(base_block_t);
+ base->resident = PAGE_CEILING(sizeof(base_block_t));
+ base->mapped = block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
}
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
-label_return:
- malloc_mutex_unlock(tsdn, &base_mtx);
- return (ret);
+ base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
+ base_size);
+
+ return base;
}
void
-base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
- size_t *mapped)
-{
+base_delete(base_t *base) {
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *next = base->blocks;
+ do {
+ base_block_t *block = next;
+ next = block->next;
+ base_unmap(extent_hooks, base_ind_get(base), block,
+ block->size);
+ } while (next != NULL);
+}
- malloc_mutex_lock(tsdn, &base_mtx);
- assert(base_allocated <= base_resident);
- assert(base_resident <= base_mapped);
- *allocated = base_allocated;
- *resident = base_resident;
- *mapped = base_mapped;
- malloc_mutex_unlock(tsdn, &base_mtx);
+extent_hooks_t *
+base_extent_hooks_get(base_t *base) {
+ return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
+ ATOMIC_ACQUIRE);
}
-bool
-base_boot(void)
-{
+extent_hooks_t *
+base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
+ extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
+ atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
+ return old_extent_hooks;
+}
+
+static void *
+base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
+ size_t *esn) {
+ alignment = QUANTUM_CEILING(alignment);
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t asize = usize + alignment - QUANTUM;
- if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
- return (true);
- base_extent_sn_next = 0;
- extent_tree_szsnad_new(&base_avail_szsnad);
- base_nodes = NULL;
+ extent_t *extent = NULL;
+ malloc_mutex_lock(tsdn, &base->mtx);
+ for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
+ extent = extent_heap_remove_first(&base->avail[i]);
+ if (extent != NULL) {
+ /* Use existing space. */
+ break;
+ }
+ }
+ if (extent == NULL) {
+ /* Try to allocate more space. */
+ extent = base_extent_alloc(tsdn, base, usize, alignment);
+ }
+ void *ret;
+ if (extent == NULL) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
+ if (esn != NULL) {
+ *esn = extent_sn_get(extent);
+ }
+label_return:
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ return ret;
+}
- return (false);
+/*
+ * base_alloc() returns zeroed memory, which is always demand-zeroed for the
+ * auto arenas, in order to make multi-page sparse data structures such as radix
+ * tree nodes efficient with respect to physical memory usage. Upon success a
+ * pointer to at least size bytes with specified alignment is returned. Note
+ * that size is rounded up to the nearest multiple of alignment to avoid false
+ * sharing.
+ */
+void *
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ return base_alloc_impl(tsdn, base, size, alignment, NULL);
+}
+
+extent_t *
+base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+ size_t esn;
+ extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
+ CACHELINE, &esn);
+ if (extent == NULL) {
+ return NULL;
+ }
+ extent_esn_set(extent, esn);
+ return extent;
}
void
-base_prefork(tsdn_t *tsdn)
-{
+base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
+ size_t *mapped) {
+ cassert(config_stats);
- malloc_mutex_prefork(tsdn, &base_mtx);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ *allocated = base->allocated;
+ *resident = base->resident;
+ *mapped = base->mapped;
+ malloc_mutex_unlock(tsdn, &base->mtx);
}
void
-base_postfork_parent(tsdn_t *tsdn)
-{
+base_prefork(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_prefork(tsdn, &base->mtx);
+}
- malloc_mutex_postfork_parent(tsdn, &base_mtx);
+void
+base_postfork_parent(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
-base_postfork_child(tsdn_t *tsdn)
-{
+base_postfork_child(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_postfork_child(tsdn, &base->mtx);
+}
- malloc_mutex_postfork_child(tsdn, &base_mtx);
+bool
+base_boot(tsdn_t *tsdn) {
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ return (b0 == NULL);
}
diff --git a/contrib/jemalloc/src/bitmap.c b/contrib/jemalloc/src/bitmap.c
index ac0f3b381954..468b3178ebfa 100644
--- a/contrib/jemalloc/src/bitmap.c
+++ b/contrib/jemalloc/src/bitmap.c
@@ -1,13 +1,15 @@
-#define JEMALLOC_BITMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
/******************************************************************************/
-#ifdef USE_TREE
+#ifdef BITMAP_USE_TREE
void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
@@ -35,46 +37,53 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
}
static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-
- return (binfo->levels[binfo->nlevels].group_offset);
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
+ return binfo->levels[binfo->nlevels].group_offset;
}
void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
- * interface, so the bitmap starts out with all 1 bits, except for
- * trailing unused bits (if any). Note that each group uses bit 0 to
- * correspond to the first logical bit in the group, so extra bits
- * are the most significant bits of the last group.
+ * interface.
+ */
+
+ if (fill) {
+ /* The "filled" bitmap starts out with all 0 bits. */
+ memset(bitmap, 0, bitmap_size(binfo));
+ return;
+ }
+
+ /*
+ * The "empty" bitmap starts out with all 1 bits, except for trailing
+ * unused bits (if any). Note that each group uses bit 0 to correspond
+ * to the first logical bit in the group, so extra bits are the most
+ * significant bits of the last group.
*/
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ }
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
}
}
-#else /* USE_TREE */
+#else /* BITMAP_USE_TREE */
void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
-
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
@@ -83,29 +92,30 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
}
static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-
- return (binfo->ngroups);
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
+ return binfo->ngroups;
}
void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
+ if (fill) {
+ memset(bitmap, 0, bitmap_size(binfo));
+ return;
+ }
+
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->ngroups - 1] >>= extra;
+ }
}
-#endif /* USE_TREE */
+#endif /* BITMAP_USE_TREE */
size_t
-bitmap_size(const bitmap_info_t *binfo)
-{
-
+bitmap_size(const bitmap_info_t *binfo) {
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
deleted file mode 100644
index 94f28f2df4b1..000000000000
--- a/contrib/jemalloc/src/chunk.c
+++ /dev/null
@@ -1,799 +0,0 @@
-#define JEMALLOC_CHUNK_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-const char *opt_dss = DSS_DEFAULT;
-size_t opt_lg_chunk = 0;
-
-/* Used exclusively for gdump triggering. */
-static size_t curchunks;
-static size_t highchunks;
-
-rtree_t chunks_rtree;
-
-/* Various chunk-related settings. */
-size_t chunksize;
-size_t chunksize_mask; /* (chunksize - 1). */
-size_t chunk_npages;
-
-static void *chunk_alloc_default(void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
-static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
- unsigned arena_ind);
-static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
- size_t length, unsigned arena_ind);
-static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
- size_t length, unsigned arena_ind);
-static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
- size_t length, unsigned arena_ind);
-static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
- size_t size_b, bool committed, unsigned arena_ind);
-static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
- size_t size_b, bool committed, unsigned arena_ind);
-
-const chunk_hooks_t chunk_hooks_default = {
- chunk_alloc_default,
- chunk_dalloc_default,
- chunk_commit_default,
- chunk_decommit_default,
- chunk_purge_default,
- chunk_split_default,
- chunk_merge_default
-};
-
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static void chunk_record(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
- extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
- bool zeroed, bool committed);
-
-/******************************************************************************/
-
-static chunk_hooks_t
-chunk_hooks_get_locked(arena_t *arena)
-{
-
- return (arena->chunk_hooks);
-}
-
-chunk_hooks_t
-chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
-{
- chunk_hooks_t chunk_hooks;
-
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks = chunk_hooks_get_locked(arena);
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
-
- return (chunk_hooks);
-}
-
-chunk_hooks_t
-chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
-{
- chunk_hooks_t old_chunk_hooks;
-
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- old_chunk_hooks = arena->chunk_hooks;
- /*
- * Copy each field atomically so that it is impossible for readers to
- * see partially updated pointers. There are places where readers only
- * need one hook function pointer (therefore no need to copy the
- * entirety of arena->chunk_hooks), and stale reads do not affect
- * correctness, so they perform unlocked reads.
- */
-#define ATOMIC_COPY_HOOK(n) do { \
- union { \
- chunk_##n##_t **n; \
- void **v; \
- } u; \
- u.n = &arena->chunk_hooks.n; \
- atomic_write_p(u.v, chunk_hooks->n); \
-} while (0)
- ATOMIC_COPY_HOOK(alloc);
- ATOMIC_COPY_HOOK(dalloc);
- ATOMIC_COPY_HOOK(commit);
- ATOMIC_COPY_HOOK(decommit);
- ATOMIC_COPY_HOOK(purge);
- ATOMIC_COPY_HOOK(split);
- ATOMIC_COPY_HOOK(merge);
-#undef ATOMIC_COPY_HOOK
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
-
- return (old_chunk_hooks);
-}
-
-static void
-chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool locked)
-{
- static const chunk_hooks_t uninitialized_hooks =
- CHUNK_HOOKS_INITIALIZER;
-
- if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
- 0) {
- *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
- chunk_hooks_get(tsdn, arena);
- }
-}
-
-static void
-chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks)
-{
-
- chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
-}
-
-static void
-chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks)
-{
-
- chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
-}
-
-bool
-chunk_register(const void *chunk, const extent_node_t *node, bool *gdump)
-{
-
- assert(extent_node_addr_get(node) == chunk);
-
- if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
- return (true);
- if (config_prof && opt_prof) {
- size_t size = extent_node_size_get(node);
- size_t nadd = (size == 0) ? 1 : size / chunksize;
- size_t cur = atomic_add_z(&curchunks, nadd);
- size_t high = atomic_read_z(&highchunks);
- while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
- /*
- * Don't refresh cur, because it may have decreased
- * since this thread lost the highchunks update race.
- */
- high = atomic_read_z(&highchunks);
- }
- *gdump = (cur > high && prof_gdump_get_unlocked());
- }
-
- return (false);
-}
-
-void
-chunk_deregister(const void *chunk, const extent_node_t *node)
-{
- bool err;
-
- err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
- assert(!err);
- if (config_prof && opt_prof) {
- size_t size = extent_node_size_get(node);
- size_t nsub = (size == 0) ? 1 : size / chunksize;
- assert(atomic_read_z(&curchunks) >= nsub);
- atomic_sub_z(&curchunks, nsub);
- }
-}
-
-/*
- * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
- * best fits.
- */
-static extent_node_t *
-chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
-{
- extent_node_t *node;
- size_t qsize;
- extent_node_t key;
-
- assert(size == CHUNK_CEILING(size));
-
- qsize = extent_size_quantize_ceil(size);
- extent_node_init(&key, arena, NULL, qsize, 0, false, false);
- node = extent_tree_szsnad_nsearch(chunks_szsnad, &key);
- assert(node == NULL || extent_node_size_get(node) >= size);
- return node;
-}
-
-static void *
-chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit, bool dalloc_node)
-{
- void *ret;
- extent_node_t *node;
- size_t alloc_size, leadsize, trailsize;
- bool zeroed, committed;
-
- assert(CHUNK_CEILING(size) == size);
- assert(alignment > 0);
- assert(new_addr == NULL || alignment == chunksize);
- assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
- /*
- * Cached chunks use the node linkage embedded in their headers, in
- * which case dalloc_node is true, and new_addr is non-NULL because
- * we're operating on a specific chunk.
- */
- assert(dalloc_node || new_addr != NULL);
-
- alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
- /* Beware size_t wrap-around. */
- if (alloc_size < size)
- return (NULL);
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
- if (new_addr != NULL) {
- extent_node_t key;
- extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
- false);
- node = extent_tree_ad_search(chunks_ad, &key);
- } else {
- node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
- }
- if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
- size)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- return (NULL);
- }
- leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
- alignment) - (uintptr_t)extent_node_addr_get(node);
- assert(new_addr == NULL || leadsize == 0);
- assert(extent_node_size_get(node) >= leadsize + size);
- trailsize = extent_node_size_get(node) - leadsize - size;
- ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
- *sn = extent_node_sn_get(node);
- zeroed = extent_node_zeroed_get(node);
- if (zeroed)
- *zero = true;
- committed = extent_node_committed_get(node);
- if (committed)
- *commit = true;
- /* Split the lead. */
- if (leadsize != 0 &&
- chunk_hooks->split(extent_node_addr_get(node),
- extent_node_size_get(node), leadsize, size, false, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- return (NULL);
- }
- /* Remove node from the tree. */
- extent_tree_szsnad_remove(chunks_szsnad, node);
- extent_tree_ad_remove(chunks_ad, node);
- arena_chunk_cache_maybe_remove(arena, node, cache);
- if (leadsize != 0) {
- /* Insert the leading space as a smaller chunk. */
- extent_node_size_set(node, leadsize);
- extent_tree_szsnad_insert(chunks_szsnad, node);
- extent_tree_ad_insert(chunks_ad, node);
- arena_chunk_cache_maybe_insert(arena, node, cache);
- node = NULL;
- }
- if (trailsize != 0) {
- /* Split the trail. */
- if (chunk_hooks->split(ret, size + trailsize, size,
- trailsize, false, arena->ind)) {
- if (dalloc_node && node != NULL)
- arena_node_dalloc(tsdn, arena, node);
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
- chunks_ad, cache, ret, size + trailsize, *sn,
- zeroed, committed);
- return (NULL);
- }
- /* Insert the trailing space as a smaller chunk. */
- if (node == NULL) {
- node = arena_node_alloc(tsdn, arena);
- if (node == NULL) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks,
- chunks_szsnad, chunks_ad, cache, ret, size
- + trailsize, *sn, zeroed, committed);
- return (NULL);
- }
- }
- extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
- trailsize, *sn, zeroed, committed);
- extent_tree_szsnad_insert(chunks_szsnad, node);
- extent_tree_ad_insert(chunks_ad, node);
- arena_chunk_cache_maybe_insert(arena, node, cache);
- node = NULL;
- }
- if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
- cache, ret, size, *sn, zeroed, committed);
- return (NULL);
- }
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
-
- assert(dalloc_node || node != NULL);
- if (dalloc_node && node != NULL)
- arena_node_dalloc(tsdn, arena, node);
- if (*zero) {
- if (!zeroed)
- memset(ret, 0, size);
- else if (config_debug) {
- size_t i;
- size_t *p = (size_t *)(uintptr_t)ret;
-
- for (i = 0; i < size / sizeof(size_t); i++)
- assert(p[i] == 0);
- }
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
- }
- return (ret);
-}
-
-/*
- * If the caller specifies (!*zero), it is still possible to receive zeroed
- * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
- * advantage of this to avoid demanding zeroed chunks, but taking advantage of
- * them if they are returned.
- */
-static void *
-chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- /* "primary" dss. */
- if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
- return (ret);
- /* mmap. */
- if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
- NULL)
- return (ret);
- /* "secondary" dss. */
- if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
- return (ret);
-
- /* All strategies for allocation failed. */
- return (NULL);
-}
-
-void *
-chunk_alloc_base(size_t size)
-{
- void *ret;
- bool zero, commit;
-
- /*
- * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
- * because it's critical that chunk_alloc_base() return untouched
- * demand-zeroed virtual memory.
- */
- zero = true;
- commit = true;
- ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
- if (ret == NULL)
- return (NULL);
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
-
- return (ret);
-}
-
-void *
-chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit, bool dalloc_node)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
- new_addr, size, alignment, sn, zero, commit, dalloc_node);
- if (ret == NULL)
- return (NULL);
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- return (ret);
-}
-
-static arena_t *
-chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
-{
- arena_t *arena;
-
- arena = arena_get(tsdn, arena_ind, false);
- /*
- * The arena we're allocating on behalf of must have been initialized
- * already.
- */
- assert(arena != NULL);
- return (arena);
-}
-
-static void *
-chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
-{
- void *ret;
-
- ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
- commit, arena->dss_prec);
- if (ret == NULL)
- return (NULL);
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
-
- return (ret);
-}
-
-static void *
-chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit, unsigned arena_ind)
-{
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = chunk_arena_get(tsdn, arena_ind);
-
- return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
- zero, commit));
-}
-
-static void *
-chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
- new_addr, size, alignment, sn, zero, commit, true);
-
- if (config_stats && ret != NULL)
- arena->stats.retained -= size;
-
- return (ret);
-}
-
-void *
-chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit)
-{
- void *ret;
-
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
-
- ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
- alignment, sn, zero, commit);
- if (ret == NULL) {
- if (chunk_hooks->alloc == chunk_alloc_default) {
- /* Call directly to propagate tsdn. */
- ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
- size, alignment, zero, commit);
- } else {
- ret = chunk_hooks->alloc(new_addr, size, alignment,
- zero, commit, arena->ind);
- }
-
- if (ret == NULL)
- return (NULL);
-
- *sn = arena_extent_sn_next(arena);
-
- if (config_valgrind && chunk_hooks->alloc !=
- chunk_alloc_default)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
- }
-
- return (ret);
-}
-
-static void
-chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
- void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
-{
- bool unzeroed;
- extent_node_t *node, *prev;
- extent_node_t key;
-
- assert(!cache || !zeroed);
- unzeroed = cache || !zeroed;
- JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
-
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
- extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
- false, false);
- node = extent_tree_ad_nsearch(chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && extent_node_addr_get(node) ==
- extent_node_addr_get(&key) && extent_node_committed_get(node) ==
- committed && !chunk_hooks->merge(chunk, size,
- extent_node_addr_get(node), extent_node_size_get(node), false,
- arena->ind)) {
- /*
- * Coalesce chunk with the following address range. This does
- * not change the position within chunks_ad, so only
- * remove/insert from/into chunks_szsnad.
- */
- extent_tree_szsnad_remove(chunks_szsnad, node);
- arena_chunk_cache_maybe_remove(arena, node, cache);
- extent_node_addr_set(node, chunk);
- extent_node_size_set(node, size + extent_node_size_get(node));
- if (sn < extent_node_sn_get(node))
- extent_node_sn_set(node, sn);
- extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
- !unzeroed);
- extent_tree_szsnad_insert(chunks_szsnad, node);
- arena_chunk_cache_maybe_insert(arena, node, cache);
- } else {
- /* Coalescing forward failed, so insert a new node. */
- node = arena_node_alloc(tsdn, arena);
- if (node == NULL) {
- /*
- * Node allocation failed, which is an exceedingly
- * unlikely failure. Leak chunk after making sure its
- * pages have already been purged, so that this is only
- * a virtual memory leak.
- */
- if (cache) {
- chunk_purge_wrapper(tsdn, arena, chunk_hooks,
- chunk, size, 0, size);
- }
- goto label_return;
- }
- extent_node_init(node, arena, chunk, size, sn, !unzeroed,
- committed);
- extent_tree_ad_insert(chunks_ad, node);
- extent_tree_szsnad_insert(chunks_szsnad, node);
- arena_chunk_cache_maybe_insert(arena, node, cache);
- }
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
- extent_node_size_get(prev)) == chunk &&
- extent_node_committed_get(prev) == committed &&
- !chunk_hooks->merge(extent_node_addr_get(prev),
- extent_node_size_get(prev), chunk, size, false, arena->ind)) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within chunks_ad, so only
- * remove/insert node from/into chunks_szsnad.
- */
- extent_tree_szsnad_remove(chunks_szsnad, prev);
- extent_tree_ad_remove(chunks_ad, prev);
- arena_chunk_cache_maybe_remove(arena, prev, cache);
- extent_tree_szsnad_remove(chunks_szsnad, node);
- arena_chunk_cache_maybe_remove(arena, node, cache);
- extent_node_addr_set(node, extent_node_addr_get(prev));
- extent_node_size_set(node, extent_node_size_get(prev) +
- extent_node_size_get(node));
- if (extent_node_sn_get(prev) < extent_node_sn_get(node))
- extent_node_sn_set(node, extent_node_sn_get(prev));
- extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
- extent_node_zeroed_get(node));
- extent_tree_szsnad_insert(chunks_szsnad, node);
- arena_chunk_cache_maybe_insert(arena, node, cache);
-
- arena_node_dalloc(tsdn, arena, prev);
- }
-
-label_return:
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
-}
-
-void
-chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t sn, bool committed)
-{
-
- assert(chunk != NULL);
- assert(CHUNK_ADDR2BASE(chunk) == chunk);
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
-
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
- &arena->chunks_ad_cached, true, chunk, size, sn, false,
- committed);
- arena_maybe_purge(tsdn, arena);
-}
-
-static bool
-chunk_dalloc_default_impl(void *chunk, size_t size)
-{
-
- if (!have_dss || !chunk_in_dss(chunk))
- return (chunk_dalloc_mmap(chunk, size));
- return (true);
-}
-
-static bool
-chunk_dalloc_default(void *chunk, size_t size, bool committed,
- unsigned arena_ind)
-{
-
- return (chunk_dalloc_default_impl(chunk, size));
-}
-
-void
-chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
-{
- bool err;
-
- assert(chunk != NULL);
- assert(CHUNK_ADDR2BASE(chunk) == chunk);
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
-
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
- /* Try to deallocate. */
- if (chunk_hooks->dalloc == chunk_dalloc_default) {
- /* Call directly to propagate tsdn. */
- err = chunk_dalloc_default_impl(chunk, size);
- } else
- err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
-
- if (!err)
- return;
- /* Try to decommit; purge if that fails. */
- if (committed) {
- committed = chunk_hooks->decommit(chunk, size, 0, size,
- arena->ind);
- }
- zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
- arena->ind);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
- &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
- committed);
-
- if (config_stats)
- arena->stats.retained += size;
-}
-
-static bool
-chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
- length));
-}
-
-static bool
-chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
- length));
-}
-
-static bool
-chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- assert(chunk != NULL);
- assert(CHUNK_ADDR2BASE(chunk) == chunk);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
- length));
-}
-
-bool
-chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t offset, size_t length)
-{
-
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
- return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
-}
-
-static bool
-chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
- bool committed, unsigned arena_ind)
-{
-
- if (!maps_coalesce)
- return (true);
- return (false);
-}
-
-static bool
-chunk_merge_default_impl(void *chunk_a, void *chunk_b)
-{
-
- if (!maps_coalesce)
- return (true);
- if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
- return (true);
-
- return (false);
-}
-
-static bool
-chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
- bool committed, unsigned arena_ind)
-{
-
- return (chunk_merge_default_impl(chunk_a, chunk_b));
-}
-
-static rtree_node_elm_t *
-chunks_rtree_node_alloc(size_t nelms)
-{
-
- return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
- sizeof(rtree_node_elm_t)));
-}
-
-bool
-chunk_boot(void)
-{
-#ifdef _WIN32
- SYSTEM_INFO info;
- GetSystemInfo(&info);
-
- /*
- * Verify actual page size is equal to or an integral multiple of
- * configured page size.
- */
- if (info.dwPageSize & ((1U << LG_PAGE) - 1))
- return (true);
-
- /*
- * Configure chunksize (if not set) to match granularity (usually 64K),
- * so pages_map will always take fast path.
- */
- if (!opt_lg_chunk) {
- opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
- - 1;
- }
-#else
- if (!opt_lg_chunk)
- opt_lg_chunk = LG_CHUNK_DEFAULT;
-#endif
-
- /* Set variables according to the value of opt_lg_chunk. */
- chunksize = (ZU(1) << opt_lg_chunk);
- assert(chunksize >= PAGE);
- chunksize_mask = chunksize - 1;
- chunk_npages = (chunksize >> LG_PAGE);
-
- if (have_dss)
- chunk_dss_boot();
- if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk), chunks_rtree_node_alloc, NULL))
- return (true);
-
- return (false);
-}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
deleted file mode 100644
index 8c6793957d95..000000000000
--- a/contrib/jemalloc/src/chunk_dss.c
+++ /dev/null
@@ -1,247 +0,0 @@
-#define JEMALLOC_CHUNK_DSS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-/******************************************************************************/
-/* Data. */
-
-const char *dss_prec_names[] = {
- "disabled",
- "primary",
- "secondary",
- "N/A"
-};
-
-/*
- * Current dss precedence default, used when creating new arenas. NB: This is
- * stored as unsigned rather than dss_prec_t because in principle there's no
- * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
- * atomic operations to synchronize the setting.
- */
-static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
-
-/* Base address of the DSS. */
-static void *dss_base;
-/* Atomic boolean indicating whether the DSS is exhausted. */
-static unsigned dss_exhausted;
-/* Atomic current upper limit on DSS addresses. */
-static void *dss_max;
-
-/******************************************************************************/
-
-static void *
-chunk_dss_sbrk(intptr_t increment)
-{
-
-#ifdef JEMALLOC_DSS
- return (sbrk(increment));
-#else
- not_implemented();
- return (NULL);
-#endif
-}
-
-dss_prec_t
-chunk_dss_prec_get(void)
-{
- dss_prec_t ret;
-
- if (!have_dss)
- return (dss_prec_disabled);
- ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
- return (ret);
-}
-
-bool
-chunk_dss_prec_set(dss_prec_t dss_prec)
-{
-
- if (!have_dss)
- return (dss_prec != dss_prec_disabled);
- atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
- return (false);
-}
-
-static void *
-chunk_dss_max_update(void *new_addr)
-{
- void *max_cur;
- spin_t spinner;
-
- /*
- * Get the current end of the DSS as max_cur and assure that dss_max is
- * up to date.
- */
- spin_init(&spinner);
- while (true) {
- void *max_prev = atomic_read_p(&dss_max);
-
- max_cur = chunk_dss_sbrk(0);
- if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
- /*
- * Another thread optimistically updated dss_max. Wait
- * for it to finish.
- */
- spin_adaptive(&spinner);
- continue;
- }
- if (!atomic_cas_p(&dss_max, max_prev, max_cur))
- break;
- }
- /* Fixed new_addr can only be supported if it is at the edge of DSS. */
- if (new_addr != NULL && max_cur != new_addr)
- return (NULL);
-
- return (max_cur);
-}
-
-void *
-chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit)
-{
- cassert(have_dss);
- assert(size > 0 && (size & chunksize_mask) == 0);
- assert(alignment > 0 && (alignment & chunksize_mask) == 0);
-
- /*
- * sbrk() uses a signed increment argument, so take care not to
- * interpret a huge allocation request as a negative increment.
- */
- if ((intptr_t)size < 0)
- return (NULL);
-
- if (!atomic_read_u(&dss_exhausted)) {
- /*
- * The loop is necessary to recover from races with other
- * threads that are using the DSS for something other than
- * malloc.
- */
- while (true) {
- void *ret, *max_cur, *dss_next, *dss_prev;
- void *gap_addr_chunk, *gap_addr_subchunk;
- size_t gap_size_chunk, gap_size_subchunk;
- intptr_t incr;
-
- max_cur = chunk_dss_max_update(new_addr);
- if (max_cur == NULL)
- goto label_oom;
-
- /*
- * Compute how much chunk-aligned gap space (if any) is
- * necessary to satisfy alignment. This space can be
- * recycled for later use.
- */
- gap_addr_chunk = (void *)(CHUNK_CEILING(
- (uintptr_t)max_cur));
- ret = (void *)ALIGNMENT_CEILING(
- (uintptr_t)gap_addr_chunk, alignment);
- gap_size_chunk = (uintptr_t)ret -
- (uintptr_t)gap_addr_chunk;
- /*
- * Compute the address just past the end of the desired
- * allocation space.
- */
- dss_next = (void *)((uintptr_t)ret + size);
- if ((uintptr_t)ret < (uintptr_t)max_cur ||
- (uintptr_t)dss_next < (uintptr_t)max_cur)
- goto label_oom; /* Wrap-around. */
- /* Compute the increment, including subchunk bytes. */
- gap_addr_subchunk = max_cur;
- gap_size_subchunk = (uintptr_t)ret -
- (uintptr_t)gap_addr_subchunk;
- incr = gap_size_subchunk + size;
-
- assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
- size);
-
- /*
- * Optimistically update dss_max, and roll back below if
- * sbrk() fails. No other thread will try to extend the
- * DSS while dss_max is greater than the current DSS
- * max reported by sbrk(0).
- */
- if (atomic_cas_p(&dss_max, max_cur, dss_next))
- continue;
-
- /* Try to allocate. */
- dss_prev = chunk_dss_sbrk(incr);
- if (dss_prev == max_cur) {
- /* Success. */
- if (gap_size_chunk != 0) {
- chunk_hooks_t chunk_hooks =
- CHUNK_HOOKS_INITIALIZER;
- chunk_dalloc_wrapper(tsdn, arena,
- &chunk_hooks, gap_addr_chunk,
- gap_size_chunk,
- arena_extent_sn_next(arena), false,
- true);
- }
- if (*zero) {
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- ret, size);
- memset(ret, 0, size);
- }
- if (!*commit)
- *commit = pages_decommit(ret, size);
- return (ret);
- }
-
- /*
- * Failure, whether due to OOM or a race with a raw
- * sbrk() call from outside the allocator. Try to roll
- * back optimistic dss_max update; if rollback fails,
- * it's due to another caller of this function having
- * succeeded since this invocation started, in which
- * case rollback is not necessary.
- */
- atomic_cas_p(&dss_max, dss_next, max_cur);
- if (dss_prev == (void *)-1) {
- /* OOM. */
- atomic_write_u(&dss_exhausted, (unsigned)true);
- goto label_oom;
- }
- }
- }
-label_oom:
- return (NULL);
-}
-
-static bool
-chunk_in_dss_helper(void *chunk, void *max)
-{
-
- return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
- (uintptr_t)max);
-}
-
-bool
-chunk_in_dss(void *chunk)
-{
-
- cassert(have_dss);
-
- return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
-}
-
-bool
-chunk_dss_mergeable(void *chunk_a, void *chunk_b)
-{
- void *max;
-
- cassert(have_dss);
-
- max = atomic_read_p(&dss_max);
- return (chunk_in_dss_helper(chunk_a, max) ==
- chunk_in_dss_helper(chunk_b, max));
-}
-
-void
-chunk_dss_boot(void)
-{
-
- cassert(have_dss);
-
- dss_base = chunk_dss_sbrk(0);
- dss_exhausted = (unsigned)(dss_base == (void *)-1);
- dss_max = dss_base;
-}
-
-/******************************************************************************/
diff --git a/contrib/jemalloc/src/chunk_mmap.c b/contrib/jemalloc/src/chunk_mmap.c
deleted file mode 100644
index 73fc497afbba..000000000000
--- a/contrib/jemalloc/src/chunk_mmap.c
+++ /dev/null
@@ -1,78 +0,0 @@
-#define JEMALLOC_CHUNK_MMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-
-static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
-{
- void *ret;
- size_t alloc_size;
-
- alloc_size = size + alignment - PAGE;
- /* Beware size_t wrap-around. */
- if (alloc_size < size)
- return (NULL);
- do {
- void *pages;
- size_t leadsize;
- pages = pages_map(NULL, alloc_size, commit);
- if (pages == NULL)
- return (NULL);
- leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
- (uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size, commit);
- } while (ret == NULL);
-
- assert(ret != NULL);
- *zero = true;
- return (ret);
-}
-
-void *
-chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit)
-{
- void *ret;
- size_t offset;
-
- /*
- * Ideally, there would be a way to specify alignment to mmap() (like
- * NetBSD has), but in the absence of such a feature, we have to work
- * hard to efficiently create aligned mappings. The reliable, but
- * slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in one or two calls to
- * pages_unmap().
- *
- * Optimistically try mapping precisely the right amount before falling
- * back to the slow method, with the expectation that the optimistic
- * approach works most of the time.
- */
-
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- ret = pages_map(new_addr, size, commit);
- if (ret == NULL || ret == new_addr)
- return (ret);
- assert(new_addr == NULL);
- offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
- if (offset != 0) {
- pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
- }
-
- assert(ret != NULL);
- *zero = true;
- return (ret);
-}
-
-bool
-chunk_dalloc_mmap(void *chunk, size_t size)
-{
-
- if (config_munmap)
- pages_unmap(chunk, size);
-
- return (!config_munmap);
-}
diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c
index 159bd8ae1618..e95e0a3ed591 100644
--- a/contrib/jemalloc/src/ckh.c
+++ b/contrib/jemalloc/src/ckh.c
@@ -34,8 +34,18 @@
* respectively.
*
******************************************************************************/
-#define JEMALLOC_CKH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/ckh.h"
+
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
-JEMALLOC_INLINE_C size_t
-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
-{
+static size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
- if (cell->key != NULL && ckh->keycomp(key, cell->key))
- return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+ if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
+ return (bucket << LG_CKH_BUCKET_CELLS) + i;
+ }
}
- return (SIZE_T_MAX);
+ return SIZE_T_MAX;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
-JEMALLOC_INLINE_C size_t
-ckh_isearch(ckh_t *ckh, const void *key)
-{
+static size_t
+ckh_isearch(ckh_t *ckh, const void *key) {
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- if (cell != SIZE_T_MAX)
- return (cell);
+ if (cell != SIZE_T_MAX) {
+ return cell;
+ }
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- return (cell);
+ return cell;
}
-JEMALLOC_INLINE_C bool
+static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
- const void *data)
-{
+ const void *data) {
ckhc_t *cell;
unsigned offset, i;
@@ -108,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell->key = key;
cell->data = data;
ckh->count++;
- return (false);
+ return false;
}
}
- return (true);
+ return true;
}
/*
@@ -121,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
-JEMALLOC_INLINE_C bool
+static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
- void const **argdata)
-{
+ void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
@@ -183,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
- return (true);
+ return true;
}
bucket = tbucket;
- if (!ckh_try_bucket_insert(ckh, bucket, key, data))
- return (false);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
}
}
-JEMALLOC_INLINE_C bool
-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
-{
+static bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
@@ -203,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (!ckh_try_bucket_insert(ckh, bucket, key, data))
- return (false);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (!ckh_try_bucket_insert(ckh, bucket, key, data))
- return (false);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
- return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
+ return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
-JEMALLOC_INLINE_C bool
-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
-{
+static bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
const void *key, *data;
@@ -235,18 +244,17 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
- return (true);
+ return true;
}
nins++;
}
}
- return (false);
+ return false;
}
static bool
-ckh_grow(tsd_t *tsd, ckh_t *ckh)
-{
+ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
unsigned lg_prevbuckets, lg_curcells;
@@ -266,8 +274,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
size_t usize;
lg_curcells++;
- usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
@@ -284,24 +292,23 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
- return (ret);
+ return ret;
}
static void
-ckh_shrink(tsd_t *tsd, ckh_t *ckh)
-{
+ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
size_t usize;
unsigned lg_prevbuckets, lg_curcells;
@@ -312,9 +319,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
- usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return;
+ }
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
@@ -331,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -339,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -349,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
- ckh_keycomp_t *keycomp)
-{
+ ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
@@ -380,15 +387,16 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
- lg_mincells++)
- ; /* Do nothing. */
+ lg_mincells++) {
+ /* Do nothing. */
+ }
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
- usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
@@ -401,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = false;
label_return:
- return (ret);
+ return ret;
}
void
-ckh_delete(tsd_t *tsd, ckh_t *ckh)
-{
-
+ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
@@ -422,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
- if (config_debug)
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
+ if (config_debug) {
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+ }
}
size_t
-ckh_count(ckh_t *ckh)
-{
-
+ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
- return (ckh->count);
+ return ckh->count;
}
bool
-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
-{
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[i].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[i].data;
+ }
*tabind = i + 1;
- return (false);
+ return false;
}
}
- return (true);
+ return true;
}
bool
-ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
-{
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
bool ret;
assert(ckh != NULL);
@@ -477,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
ret = false;
label_return:
- return (ret);
+ return ret;
}
bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
- void **data)
-{
+ void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
+ }
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
@@ -506,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_shrink(tsd, ckh);
}
- return (false);
+ return false;
}
- return (true);
+ return true;
}
bool
-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
- return (false);
+ }
+ return false;
}
- return (true);
+ return true;
}
void
-ckh_string_hash(const void *key, size_t r_hash[2])
-{
-
+ckh_string_hash(const void *key, size_t r_hash[2]) {
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
-ckh_string_keycomp(const void *k1, const void *k2)
-{
-
- assert(k1 != NULL);
- assert(k2 != NULL);
+ckh_string_keycomp(const void *k1, const void *k2) {
+ assert(k1 != NULL);
+ assert(k2 != NULL);
- return (strcmp((char *)k1, (char *)k2) ? false : true);
+ return !strcmp((char *)k1, (char *)k2);
}
void
-ckh_pointer_hash(const void *key, size_t r_hash[2])
-{
+ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
@@ -562,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
-ckh_pointer_keycomp(const void *k1, const void *k2)
-{
-
- return ((k1 == k2) ? true : false);
+ckh_pointer_keycomp(const void *k1, const void *k2) {
+ return (k1 == k2);
}
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index 56bc4f4cca91..f1310cdf1dbc 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -1,69 +1,62 @@
-#define JEMALLOC_CTL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
- * - ctl_stats.*
+ * - ctl_stats->*
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
-static uint64_t ctl_epoch;
-static ctl_stats_t ctl_stats;
+static ctl_stats_t *ctl_stats;
+static ctl_arenas_t *ctl_arenas;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
-JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_node(const ctl_node_t *node)
-{
-
+static const ctl_named_node_t *
+ctl_named_node(const ctl_node_t *node) {
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
-JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, size_t index)
-{
+static const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, size_t index) {
const ctl_named_node_t *children = ctl_named_node(node->children);
return (children ? &children[index] : NULL);
}
-JEMALLOC_INLINE_C const ctl_indexed_node_t *
-ctl_indexed_node(const ctl_node_t *node)
-{
-
+static const ctl_indexed_node_t *
+ctl_indexed_node(const ctl_node_t *node) {
return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
}
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-#define CTL_PROTO(n) \
+#define CTL_PROTO(n) \
static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-#define INDEX_PROTO(n) \
+#define INDEX_PROTO(n) \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
-static bool ctl_arena_init(ctl_arena_stats_t *astats);
-static void ctl_arena_clear(ctl_arena_stats_t *astats);
-static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
- arena_t *arena);
-static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
- ctl_arena_stats_t *astats);
-static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
-static bool ctl_grow(tsdn_t *tsdn);
-static void ctl_refresh(tsdn_t *tsdn);
-static bool ctl_init(tsdn_t *tsdn);
-static int ctl_lookup(tsdn_t *tsdn, const char *name,
- ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
-
CTL_PROTO(version)
CTL_PROTO(epoch)
+CTL_PROTO(background_thread)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_prof_name)
@@ -78,34 +71,30 @@ CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_malloc_conf)
-CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
-CTL_PROTO(config_tcache)
CTL_PROTO(config_thp)
-CTL_PROTO(config_tls)
CTL_PROTO(config_utrace)
-CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
+CTL_PROTO(opt_abort_conf)
+CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
-CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_purge)
-CTL_PROTO(opt_lg_dirty_mult)
-CTL_PROTO(opt_decay_time)
+CTL_PROTO(opt_percpu_arena)
+CTL_PROTO(opt_background_thread)
+CTL_PROTO(opt_dirty_decay_ms)
+CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
+CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
-CTL_PROTO(opt_quarantine)
-CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
-CTL_PROTO(opt_thp)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
@@ -119,35 +108,32 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
-static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
-CTL_PROTO(arena_i_purge)
+CTL_PROTO(arena_i_initialized)
CTL_PROTO(arena_i_decay)
+CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
+CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
-CTL_PROTO(arena_i_lg_dirty_mult)
-CTL_PROTO(arena_i_decay_time)
-CTL_PROTO(arena_i_chunk_hooks)
+CTL_PROTO(arena_i_dirty_decay_ms)
+CTL_PROTO(arena_i_muzzy_decay_ms)
+CTL_PROTO(arena_i_extent_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
-CTL_PROTO(arenas_bin_i_run_size)
+CTL_PROTO(arenas_bin_i_slab_size)
INDEX_PROTO(arenas_bin_i)
-CTL_PROTO(arenas_lrun_i_size)
-INDEX_PROTO(arenas_lrun_i)
-CTL_PROTO(arenas_hchunk_i_size)
-INDEX_PROTO(arenas_hchunk_i)
+CTL_PROTO(arenas_lextent_i_size)
+INDEX_PROTO(arenas_lextent_i)
CTL_PROTO(arenas_narenas)
-CTL_PROTO(arenas_initialized)
-CTL_PROTO(arenas_lg_dirty_mult)
-CTL_PROTO(arenas_decay_time)
+CTL_PROTO(arenas_dirty_decay_ms)
+CTL_PROTO(arenas_muzzy_decay_ms)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
-CTL_PROTO(arenas_nlruns)
-CTL_PROTO(arenas_nhchunks)
-CTL_PROTO(arenas_extend)
+CTL_PROTO(arenas_nlextents)
+CTL_PROTO(arenas_create)
CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
@@ -163,70 +149,92 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_huge_allocated)
-CTL_PROTO(stats_arenas_i_huge_nmalloc)
-CTL_PROTO(stats_arenas_i_huge_ndalloc)
-CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-CTL_PROTO(stats_arenas_i_bins_j_nruns)
-CTL_PROTO(stats_arenas_i_bins_j_nreruns)
-CTL_PROTO(stats_arenas_i_bins_j_curruns)
+CTL_PROTO(stats_arenas_i_bins_j_nslabs)
+CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
+CTL_PROTO(stats_arenas_i_bins_j_curslabs)
INDEX_PROTO(stats_arenas_i_bins_j)
-CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
-CTL_PROTO(stats_arenas_i_lruns_j_curruns)
-INDEX_PROTO(stats_arenas_i_lruns_j)
-CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
-CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
-CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
-CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
-INDEX_PROTO(stats_arenas_i_hchunks_j)
+CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
+CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
+INDEX_PROTO(stats_arenas_i_lextents_j)
CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
-CTL_PROTO(stats_arenas_i_lg_dirty_mult)
-CTL_PROTO(stats_arenas_i_decay_time)
+CTL_PROTO(stats_arenas_i_dirty_decay_ms)
+CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
+CTL_PROTO(stats_arenas_i_pmuzzy)
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_retained)
-CTL_PROTO(stats_arenas_i_npurge)
-CTL_PROTO(stats_arenas_i_nmadvise)
-CTL_PROTO(stats_arenas_i_purged)
-CTL_PROTO(stats_arenas_i_metadata_mapped)
-CTL_PROTO(stats_arenas_i_metadata_allocated)
+CTL_PROTO(stats_arenas_i_dirty_npurge)
+CTL_PROTO(stats_arenas_i_dirty_nmadvise)
+CTL_PROTO(stats_arenas_i_dirty_purged)
+CTL_PROTO(stats_arenas_i_muzzy_npurge)
+CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
+CTL_PROTO(stats_arenas_i_muzzy_purged)
+CTL_PROTO(stats_arenas_i_base)
+CTL_PROTO(stats_arenas_i_internal)
+CTL_PROTO(stats_arenas_i_tcache_bytes)
+CTL_PROTO(stats_arenas_i_resident)
INDEX_PROTO(stats_arenas_i)
-CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
+CTL_PROTO(stats_background_thread_num_threads)
+CTL_PROTO(stats_background_thread_num_runs)
+CTL_PROTO(stats_background_thread_run_interval)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
+#define MUTEX_STATS_CTL_PROTO_GEN(n) \
+CTL_PROTO(stats_##n##_num_ops) \
+CTL_PROTO(stats_##n##_num_wait) \
+CTL_PROTO(stats_##n##_num_spin_acq) \
+CTL_PROTO(stats_##n##_num_owner_switch) \
+CTL_PROTO(stats_##n##_total_wait_time) \
+CTL_PROTO(stats_##n##_max_wait_time) \
+CTL_PROTO(stats_##n##_max_num_thds)
+
+/* Global mutexes. */
+#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+/* Per arena mutexes. */
+#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+/* Arena bin mutexes. */
+MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
+#undef MUTEX_STATS_CTL_PROTO_GEN
+
+CTL_PROTO(stats_mutexes_reset)
+
/******************************************************************************/
/* mallctl tree. */
-/* Maximum tree depth. */
-#define CTL_MAX_DEPTH 6
-
-#define NAME(n) {true}, n
-#define CHILD(t, c) \
+#define NAME(n) {true}, n
+#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
(ctl_node_t *)c##_node, \
NULL
-#define CTL(c) 0, NULL, c##_ctl
+#define CTL(c) 0, NULL, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
-#define INDEX(i) {false}, i##_index
+#define INDEX(i) {false}, i##_index
static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
@@ -254,37 +262,33 @@ static const ctl_named_node_t config_node[] = {
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("malloc_conf"), CTL(config_malloc_conf)},
- {NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)},
- {NAME("tcache"), CTL(config_tcache)},
{NAME("thp"), CTL(config_thp)},
- {NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)},
- {NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
+ {NAME("abort_conf"), CTL(opt_abort_conf)},
+ {NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
- {NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
- {NAME("purge"), CTL(opt_purge)},
- {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("decay_time"), CTL(opt_decay_time)},
+ {NAME("percpu_arena"), CTL(opt_percpu_arena)},
+ {NAME("background_thread"), CTL(opt_background_thread)},
+ {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
- {NAME("quarantine"), CTL(opt_quarantine)},
- {NAME("redzone"), CTL(opt_redzone)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
- {NAME("thp"), CTL(opt_thp)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
@@ -304,13 +308,15 @@ static const ctl_named_node_t tcache_node[] = {
};
static const ctl_named_node_t arena_i_node[] = {
- {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("initialized"), CTL(arena_i_initialized)},
{NAME("decay"), CTL(arena_i_decay)},
+ {NAME("purge"), CTL(arena_i_purge)},
{NAME("reset"), CTL(arena_i_reset)},
+ {NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
- {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
- {NAME("decay_time"), CTL(arena_i_decay_time)},
- {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
+ {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
+ {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)}
@@ -323,7 +329,7 @@ static const ctl_indexed_node_t arena_node[] = {
static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
- {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+ {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
{NAME(""), CHILD(named, arenas_bin_i)}
@@ -333,44 +339,30 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
-static const ctl_named_node_t arenas_lrun_i_node[] = {
- {NAME("size"), CTL(arenas_lrun_i_size)}
-};
-static const ctl_named_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(named, arenas_lrun_i)}
+static const ctl_named_node_t arenas_lextent_i_node[] = {
+ {NAME("size"), CTL(arenas_lextent_i_size)}
};
-
-static const ctl_indexed_node_t arenas_lrun_node[] = {
- {INDEX(arenas_lrun_i)}
+static const ctl_named_node_t super_arenas_lextent_i_node[] = {
+ {NAME(""), CHILD(named, arenas_lextent_i)}
};
-static const ctl_named_node_t arenas_hchunk_i_node[] = {
- {NAME("size"), CTL(arenas_hchunk_i_size)}
-};
-static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
- {NAME(""), CHILD(named, arenas_hchunk_i)}
-};
-
-static const ctl_indexed_node_t arenas_hchunk_node[] = {
- {INDEX(arenas_hchunk_i)}
+static const ctl_indexed_node_t arenas_lextent_node[] = {
+ {INDEX(arenas_lextent_i)}
};
static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
- {NAME("initialized"), CTL(arenas_initialized)},
- {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
- {NAME("decay_time"), CTL(arenas_decay_time)},
+ {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
{NAME("nbins"), CTL(arenas_nbins)},
{NAME("nhbins"), CTL(arenas_nhbins)},
{NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("nhchunks"), CTL(arenas_nhchunks)},
- {NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
- {NAME("extend"), CTL(arenas_extend)}
+ {NAME("nlextents"), CTL(arenas_nlextents)},
+ {NAME("lextent"), CHILD(indexed, arenas_lextent)},
+ {NAME("create"), CTL(arenas_create)}
};
static const ctl_named_node_t prof_node[] = {
@@ -383,11 +375,6 @@ static const ctl_named_node_t prof_node[] = {
{NAME("lg_sample"), CTL(lg_prof_sample)}
};
-static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
- {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
- {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
-};
-
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@@ -402,13 +389,27 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
-static const ctl_named_node_t stats_arenas_i_huge_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
+#define MUTEX_PROF_DATA_NODE(prefix) \
+static const ctl_named_node_t stats_##prefix##_node[] = { \
+ {NAME("num_ops"), \
+ CTL(stats_##prefix##_num_ops)}, \
+ {NAME("num_wait"), \
+ CTL(stats_##prefix##_num_wait)}, \
+ {NAME("num_spin_acq"), \
+ CTL(stats_##prefix##_num_spin_acq)}, \
+ {NAME("num_owner_switch"), \
+ CTL(stats_##prefix##_num_owner_switch)}, \
+ {NAME("total_wait_time"), \
+ CTL(stats_##prefix##_total_wait_time)}, \
+ {NAME("max_wait_time"), \
+ CTL(stats_##prefix##_max_wait_time)}, \
+ {NAME("max_num_thds"), \
+ CTL(stats_##prefix##_max_num_thds)} \
+ /* Note that # of current waiting thread not provided. */ \
};
+MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
+
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
@@ -416,10 +417,12 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
- {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
- {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
- {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+ {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
+ {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
+ {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
+ {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
};
+
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
@@ -428,53 +431,56 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
-static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
+ {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
};
-static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
+static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
};
-static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
- {INDEX(stats_arenas_i_lruns_j)}
+static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
+ {INDEX(stats_arenas_i_lextents_j)}
};
-static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
- {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
-};
-static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
-};
+#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
-static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
- {INDEX(stats_arenas_i_hchunks_j)}
+static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
+#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
};
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("uptime"), CTL(stats_arenas_i_uptime)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
- {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
- {NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
+ {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("retained"), CTL(stats_arenas_i_retained)},
- {NAME("npurge"), CTL(stats_arenas_i_npurge)},
- {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
- {NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
+ {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
+ {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
+ {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
+ {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
+ {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
+ {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
+ {NAME("base"), CTL(stats_arenas_i_base)},
+ {NAME("internal"), CTL(stats_arenas_i_internal)},
+ {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
+ {NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
- {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
- {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
+ {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
+ {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)}
@@ -484,20 +490,41 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)}
};
+static const ctl_named_node_t stats_background_thread_node[] = {
+ {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
+ {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
+ {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
+};
+
+#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+static const ctl_named_node_t stats_mutexes_node[] = {
+#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+ {NAME("reset"), CTL(stats_mutexes_reset)}
+};
+#undef MUTEX_PROF_DATA_NODE
+
static const ctl_named_node_t stats_node[] = {
- {NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("retained"), CTL(stats_retained)},
+ {NAME("background_thread"),
+ CHILD(named, stats_background_thread)},
+ {NAME("mutexes"), CHILD(named, stats_mutexes)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
+ {NAME("background_thread"), CTL(background_thread)},
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
@@ -518,310 +545,513 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
-static bool
-ctl_arena_init(ctl_arena_stats_t *astats)
-{
+/*
+ * Sets *dst + *src non-atomically. This is safe, since everything is
+ * synchronized by the ctl mutex.
+ */
+static void
+accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
+ atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+#else
+ *dst += *src;
+#endif
+}
- if (astats->lstats == NULL) {
- astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (astats->lstats == NULL)
- return (true);
+/* Likewise: with ctl mutex synchronization, reading is simple. */
+static uint64_t
+arena_stats_read_u64(arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ return *p;
+#endif
+}
+
+static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
+ size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
+ size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
+ atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+}
+
+/******************************************************************************/
+
+static unsigned
+arenas_i2a_impl(size_t i, bool compat, bool validate) {
+ unsigned a;
+
+ switch (i) {
+ case MALLCTL_ARENAS_ALL:
+ a = 0;
+ break;
+ case MALLCTL_ARENAS_DESTROYED:
+ a = 1;
+ break;
+ default:
+ if (compat && i == ctl_arenas->narenas) {
+ /*
+ * Provide deprecated backward compatibility for
+ * accessing the merged stats at index narenas rather
+ * than via MALLCTL_ARENAS_ALL. This is scheduled for
+ * removal in 6.0.0.
+ */
+ a = 0;
+ } else if (validate && i >= ctl_arenas->narenas) {
+ a = UINT_MAX;
+ } else {
+ /*
+ * This function should never be called for an index
+ * more than one past the range of indices that have
+ * initialized ctl data.
+ */
+ assert(i < ctl_arenas->narenas || (!validate && i ==
+ ctl_arenas->narenas));
+ a = (unsigned)i + 2;
+ }
+ break;
}
- if (astats->hstats == NULL) {
- astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
- sizeof(malloc_huge_stats_t));
- if (astats->hstats == NULL)
- return (true);
+ return a;
+}
+
+static unsigned
+arenas_i2a(size_t i) {
+ return arenas_i2a_impl(i, true, false);
+}
+
+static ctl_arena_t *
+arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) {
+ ctl_arena_t *ret;
+
+ assert(!compat || !init);
+
+ ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
+ if (init && ret == NULL) {
+ if (config_stats) {
+ struct container_s {
+ ctl_arena_t ctl_arena;
+ ctl_arena_stats_t astats;
+ };
+ struct container_s *cont =
+ (struct container_s *)base_alloc(tsdn, b0get(),
+ sizeof(struct container_s), QUANTUM);
+ if (cont == NULL) {
+ return NULL;
+ }
+ ret = &cont->ctl_arena;
+ ret->astats = &cont->astats;
+ } else {
+ ret = (ctl_arena_t *)base_alloc(tsdn, b0get(),
+ sizeof(ctl_arena_t), QUANTUM);
+ if (ret == NULL) {
+ return NULL;
+ }
+ }
+ ret->arena_ind = (unsigned)i;
+ ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
}
- return (false);
+ assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
+ return ret;
+}
+
+static ctl_arena_t *
+arenas_i(size_t i) {
+ ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false);
+ assert(ret != NULL);
+ return ret;
}
static void
-ctl_arena_clear(ctl_arena_stats_t *astats)
-{
-
- astats->nthreads = 0;
- astats->dss = dss_prec_names[dss_prec_limit];
- astats->lg_dirty_mult = -1;
- astats->decay_time = -1;
- astats->pactive = 0;
- astats->pdirty = 0;
+ctl_arena_clear(ctl_arena_t *ctl_arena) {
+ ctl_arena->nthreads = 0;
+ ctl_arena->dss = dss_prec_names[dss_prec_limit];
+ ctl_arena->dirty_decay_ms = -1;
+ ctl_arena->muzzy_decay_ms = -1;
+ ctl_arena->pactive = 0;
+ ctl_arena->pdirty = 0;
+ ctl_arena->pmuzzy = 0;
if (config_stats) {
- memset(&astats->astats, 0, sizeof(arena_stats_t));
- astats->allocated_small = 0;
- astats->nmalloc_small = 0;
- astats->ndalloc_small = 0;
- astats->nrequests_small = 0;
- memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
- memset(astats->lstats, 0, nlclasses *
+ memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
+ ctl_arena->astats->allocated_small = 0;
+ ctl_arena->astats->nmalloc_small = 0;
+ ctl_arena->astats->ndalloc_small = 0;
+ ctl_arena->astats->nrequests_small = 0;
+ memset(ctl_arena->astats->bstats, 0, NBINS *
+ sizeof(malloc_bin_stats_t));
+ memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
sizeof(malloc_large_stats_t));
- memset(astats->hstats, 0, nhclasses *
- sizeof(malloc_huge_stats_t));
}
}
static void
-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
-{
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
unsigned i;
if (config_stats) {
- arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
- &cstats->lg_dirty_mult, &cstats->decay_time,
- &cstats->pactive, &cstats->pdirty, &cstats->astats,
- cstats->bstats, cstats->lstats, cstats->hstats);
+ arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
+ &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
+ &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
+ &ctl_arena->pdirty, &ctl_arena->pmuzzy,
+ &ctl_arena->astats->astats, ctl_arena->astats->bstats,
+ ctl_arena->astats->lstats);
for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].curregs *
- index2size(i);
- cstats->nmalloc_small += cstats->bstats[i].nmalloc;
- cstats->ndalloc_small += cstats->bstats[i].ndalloc;
- cstats->nrequests_small += cstats->bstats[i].nrequests;
+ ctl_arena->astats->allocated_small +=
+ ctl_arena->astats->bstats[i].curregs *
+ sz_index2size(i);
+ ctl_arena->astats->nmalloc_small +=
+ ctl_arena->astats->bstats[i].nmalloc;
+ ctl_arena->astats->ndalloc_small +=
+ ctl_arena->astats->bstats[i].ndalloc;
+ ctl_arena->astats->nrequests_small +=
+ ctl_arena->astats->bstats[i].nrequests;
}
} else {
- arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
- &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
- &cstats->pactive, &cstats->pdirty);
+ arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
+ &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
+ &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
+ &ctl_arena->pdirty, &ctl_arena->pmuzzy);
}
}
static void
-ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
-{
+ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
+ bool destroyed) {
unsigned i;
- sstats->nthreads += astats->nthreads;
- sstats->pactive += astats->pactive;
- sstats->pdirty += astats->pdirty;
+ if (!destroyed) {
+ ctl_sdarena->nthreads += ctl_arena->nthreads;
+ ctl_sdarena->pactive += ctl_arena->pactive;
+ ctl_sdarena->pdirty += ctl_arena->pdirty;
+ ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
+ } else {
+ assert(ctl_arena->nthreads == 0);
+ assert(ctl_arena->pactive == 0);
+ assert(ctl_arena->pdirty == 0);
+ assert(ctl_arena->pmuzzy == 0);
+ }
if (config_stats) {
- sstats->astats.mapped += astats->astats.mapped;
- sstats->astats.retained += astats->astats.retained;
- sstats->astats.npurge += astats->astats.npurge;
- sstats->astats.nmadvise += astats->astats.nmadvise;
- sstats->astats.purged += astats->astats.purged;
-
- sstats->astats.metadata_mapped +=
- astats->astats.metadata_mapped;
- sstats->astats.metadata_allocated +=
- astats->astats.metadata_allocated;
-
- sstats->allocated_small += astats->allocated_small;
- sstats->nmalloc_small += astats->nmalloc_small;
- sstats->ndalloc_small += astats->ndalloc_small;
- sstats->nrequests_small += astats->nrequests_small;
-
- sstats->astats.allocated_large +=
- astats->astats.allocated_large;
- sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sstats->astats.nrequests_large +=
- astats->astats.nrequests_large;
-
- sstats->astats.allocated_huge += astats->astats.allocated_huge;
- sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
- sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+ ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
+ ctl_arena_stats_t *astats = ctl_arena->astats;
+
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.mapped,
+ &astats->astats.mapped);
+ accum_atomic_zu(&sdstats->astats.retained,
+ &astats->astats.retained);
+ }
+
+ accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
+ &astats->astats.decay_dirty.npurge);
+ accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
+ &astats->astats.decay_dirty.nmadvise);
+ accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
+ &astats->astats.decay_dirty.purged);
+
+ accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
+ &astats->astats.decay_muzzy.npurge);
+ accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
+ &astats->astats.decay_muzzy.nmadvise);
+ accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
+ &astats->astats.decay_muzzy.purged);
+
+#define OP(mtx) malloc_mutex_prof_merge( \
+ &(sdstats->astats.mutex_prof_data[ \
+ arena_prof_mutex_##mtx]), \
+ &(astats->astats.mutex_prof_data[ \
+ arena_prof_mutex_##mtx]));
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.base,
+ &astats->astats.base);
+ accum_atomic_zu(&sdstats->astats.internal,
+ &astats->astats.internal);
+ accum_atomic_zu(&sdstats->astats.resident,
+ &astats->astats.resident);
+ } else {
+ assert(atomic_load_zu(
+ &astats->astats.internal, ATOMIC_RELAXED) == 0);
+ }
+
+ if (!destroyed) {
+ sdstats->allocated_small += astats->allocated_small;
+ } else {
+ assert(astats->allocated_small == 0);
+ }
+ sdstats->nmalloc_small += astats->nmalloc_small;
+ sdstats->ndalloc_small += astats->ndalloc_small;
+ sdstats->nrequests_small += astats->nrequests_small;
+
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.allocated_large,
+ &astats->astats.allocated_large);
+ } else {
+ assert(atomic_load_zu(&astats->astats.allocated_large,
+ ATOMIC_RELAXED) == 0);
+ }
+ accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
+ &astats->astats.nmalloc_large);
+ accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
+ &astats->astats.ndalloc_large);
+ accum_arena_stats_u64(&sdstats->astats.nrequests_large,
+ &astats->astats.nrequests_large);
+
+ accum_atomic_zu(&sdstats->astats.tcache_bytes,
+ &astats->astats.tcache_bytes);
+
+ if (ctl_arena->arena_ind == 0) {
+ sdstats->astats.uptime = astats->astats.uptime;
+ }
for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sstats->bstats[i].nrequests +=
+ sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sdstats->bstats[i].nrequests +=
astats->bstats[i].nrequests;
- sstats->bstats[i].curregs += astats->bstats[i].curregs;
- if (config_tcache) {
- sstats->bstats[i].nfills +=
- astats->bstats[i].nfills;
- sstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
+ if (!destroyed) {
+ sdstats->bstats[i].curregs +=
+ astats->bstats[i].curregs;
+ } else {
+ assert(astats->bstats[i].curregs == 0);
}
- sstats->bstats[i].nruns += astats->bstats[i].nruns;
- sstats->bstats[i].reruns += astats->bstats[i].reruns;
- sstats->bstats[i].curruns += astats->bstats[i].curruns;
- }
-
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests +=
- astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ sdstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sdstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
+ sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
+ sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
+ if (!destroyed) {
+ sdstats->bstats[i].curslabs +=
+ astats->bstats[i].curslabs;
+ } else {
+ assert(astats->bstats[i].curslabs == 0);
+ }
+ malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
+ &astats->bstats[i].mutex_data);
}
- for (i = 0; i < nhclasses; i++) {
- sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
- sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
- sstats->hstats[i].curhchunks +=
- astats->hstats[i].curhchunks;
+ for (i = 0; i < NSIZES - NBINS; i++) {
+ accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ &astats->lstats[i].nmalloc);
+ accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ &astats->lstats[i].ndalloc);
+ accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ &astats->lstats[i].nrequests);
+ if (!destroyed) {
+ sdstats->lstats[i].curlextents +=
+ astats->lstats[i].curlextents;
+ } else {
+ assert(astats->lstats[i].curlextents == 0);
+ }
}
}
}
static void
-ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
-{
- ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
- ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
+ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
+ unsigned i, bool destroyed) {
+ ctl_arena_t *ctl_arena = arenas_i(i);
- ctl_arena_clear(astats);
- ctl_arena_stats_amerge(tsdn, astats, arena);
+ ctl_arena_clear(ctl_arena);
+ ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
/* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
+ ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
}
-static bool
-ctl_grow(tsdn_t *tsdn)
-{
- ctl_arena_stats_t *astats;
+static unsigned
+ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) {
+ unsigned arena_ind;
+ ctl_arena_t *ctl_arena;
+
+ if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
+ NULL) {
+ ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
+ arena_ind = ctl_arena->arena_ind;
+ } else {
+ arena_ind = ctl_arenas->narenas;
+ }
+
+ /* Trigger stats allocation. */
+ if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) {
+ return UINT_MAX;
+ }
/* Initialize new arena. */
- if (arena_init(tsdn, ctl_stats.narenas) == NULL)
- return (true);
-
- /* Allocate extended arena stats. */
- astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
- sizeof(ctl_arena_stats_t));
- if (astats == NULL)
- return (true);
-
- /* Initialize the new astats element. */
- memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
- memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
- if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
- a0dalloc(astats);
- return (true);
- }
- /* Swap merged stats to their new location. */
- {
- ctl_arena_stats_t tstats;
- memcpy(&tstats, &astats[ctl_stats.narenas],
- sizeof(ctl_arena_stats_t));
- memcpy(&astats[ctl_stats.narenas],
- &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
- memcpy(&astats[ctl_stats.narenas + 1], &tstats,
- sizeof(ctl_arena_stats_t));
+ if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) {
+ return UINT_MAX;
+ }
+
+ if (arena_ind == ctl_arenas->narenas) {
+ ctl_arenas->narenas++;
}
- a0dalloc(ctl_stats.arenas);
- ctl_stats.arenas = astats;
- ctl_stats.narenas++;
- return (false);
+ return arena_ind;
+}
+
+static void
+ctl_background_thread_stats_read(tsdn_t *tsdn) {
+ background_thread_stats_t *stats = &ctl_stats->background_thread;
+ if (!have_background_thread ||
+ background_thread_stats_read(tsdn, stats)) {
+ memset(stats, 0, sizeof(background_thread_stats_t));
+ nstime_init(&stats->run_interval, 0);
+ }
}
static void
-ctl_refresh(tsdn_t *tsdn)
-{
+ctl_refresh(tsdn_t *tsdn) {
unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+ ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
- ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
+ ctl_arena_clear(ctl_sarena);
- for (i = 0; i < ctl_stats.narenas; i++)
+ for (i = 0; i < ctl_arenas->narenas; i++) {
tarenas[i] = arena_get(tsdn, i, false);
+ }
- for (i = 0; i < ctl_stats.narenas; i++) {
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ ctl_arena_t *ctl_arena = arenas_i(i);
bool initialized = (tarenas[i] != NULL);
- ctl_stats.arenas[i].initialized = initialized;
- if (initialized)
- ctl_arena_refresh(tsdn, tarenas[i], i);
+ ctl_arena->initialized = initialized;
+ if (initialized) {
+ ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
+ false);
+ }
}
if (config_stats) {
- size_t base_allocated, base_resident, base_mapped;
- base_stats_get(tsdn, &base_allocated, &base_resident,
- &base_mapped);
- ctl_stats.allocated =
- ctl_stats.arenas[ctl_stats.narenas].allocated_small +
- ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
- ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
- ctl_stats.active =
- (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
- ctl_stats.metadata = base_allocated +
- ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
- ctl_stats.arenas[ctl_stats.narenas].astats
- .metadata_allocated;
- ctl_stats.resident = base_resident +
- ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
- ((ctl_stats.arenas[ctl_stats.narenas].pactive +
- ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
- ctl_stats.mapped = base_mapped +
- ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
- ctl_stats.retained =
- ctl_stats.arenas[ctl_stats.narenas].astats.retained;
- }
-
- ctl_epoch++;
+ ctl_stats->allocated = ctl_sarena->astats->allocated_small +
+ atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
+ ATOMIC_RELAXED);
+ ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
+ ctl_stats->metadata = atomic_load_zu(
+ &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
+ atomic_load_zu(&ctl_sarena->astats->astats.internal,
+ ATOMIC_RELAXED);
+ ctl_stats->resident = atomic_load_zu(
+ &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
+ ctl_stats->mapped = atomic_load_zu(
+ &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
+ ctl_stats->retained = atomic_load_zu(
+ &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
+
+ ctl_background_thread_stats_read(tsdn);
+
+#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
+ malloc_mutex_lock(tsdn, &mtx); \
+ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
+ malloc_mutex_unlock(tsdn, &mtx);
+
+ if (config_prof && opt_prof) {
+ READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
+ bt2gctx_mtx);
+ }
+ if (have_background_thread) {
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_background_thread,
+ background_thread_lock);
+ } else {
+ memset(&ctl_stats->mutex_prof_data[
+ global_prof_mutex_background_thread], 0,
+ sizeof(mutex_prof_data_t));
+ }
+ /* We own ctl mutex already. */
+ malloc_mutex_prof_read(tsdn,
+ &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
+ &ctl_mtx);
+#undef READ_GLOBAL_MUTEX_PROF_DATA
+ }
+ ctl_arenas->epoch++;
}
static bool
-ctl_init(tsdn_t *tsdn)
-{
+ctl_init(tsdn_t *tsdn) {
bool ret;
malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) {
+ ctl_arena_t *ctl_sarena, *ctl_darena;
+ unsigned i;
+
+ /*
+ * Allocate demand-zeroed space for pointers to the full
+ * range of supported arena indices.
+ */
+ if (ctl_arenas == NULL) {
+ ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
+ b0get(), sizeof(ctl_arenas_t), QUANTUM);
+ if (ctl_arenas == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ if (config_stats && ctl_stats == NULL) {
+ ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
+ sizeof(ctl_stats_t), QUANTUM);
+ if (ctl_stats == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
/*
- * Allocate space for one extra arena stats element, which
- * contains summed stats across all arenas.
+ * Allocate space for the current full range of arenas
+ * here rather than doing it lazily elsewhere, in order
+ * to limit when OOM-caused errors can occur.
*/
- ctl_stats.narenas = narenas_total_get();
- ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
- (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
- if (ctl_stats.arenas == NULL) {
+ if ((ctl_sarena = arenas_i_impl(tsdn, MALLCTL_ARENAS_ALL, false,
+ true)) == NULL) {
ret = true;
goto label_return;
}
- memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
+ ctl_sarena->initialized = true;
+ if ((ctl_darena = arenas_i_impl(tsdn, MALLCTL_ARENAS_DESTROYED,
+ false, true)) == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ ctl_arena_clear(ctl_darena);
/*
- * Initialize all stats structures, regardless of whether they
- * ever get used. Lazy initialization would allow errors to
- * cause inconsistent state to be viewable by the application.
+ * Don't toggle ctl_darena to initialized until an arena is
+ * actually destroyed, so that arena.<i>.initialized can be used
+ * to query whether the stats are relevant.
*/
- if (config_stats) {
- unsigned i;
- for (i = 0; i <= ctl_stats.narenas; i++) {
- if (ctl_arena_init(&ctl_stats.arenas[i])) {
- unsigned j;
- for (j = 0; j < i; j++) {
- a0dalloc(
- ctl_stats.arenas[j].lstats);
- a0dalloc(
- ctl_stats.arenas[j].hstats);
- }
- a0dalloc(ctl_stats.arenas);
- ctl_stats.arenas = NULL;
- ret = true;
- goto label_return;
- }
+
+ ctl_arenas->narenas = narenas_total_get();
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ if (arenas_i_impl(tsdn, i, false, true) == NULL) {
+ ret = true;
+ goto label_return;
}
}
- ctl_stats.arenas[ctl_stats.narenas].initialized = true;
- ctl_epoch = 0;
+ ql_new(&ctl_arenas->destroyed);
ctl_refresh(tsdn);
+
ctl_initialized = true;
}
ret = false;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
- return (ret);
+ return ret;
}
static int
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp)
-{
+ size_t *mibp, size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -849,9 +1079,10 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL)
+ if (nodesp != NULL) {
nodesp[i] =
(const ctl_node_t *)node;
+ }
mibp[i] = j;
break;
}
@@ -878,8 +1109,9 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return;
}
- if (nodesp != NULL)
+ if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node;
+ }
mibp[i] = (size_t)index;
}
@@ -912,13 +1144,12 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
int
ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+ void *newp, size_t newlen) {
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
@@ -932,13 +1163,14 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
- if (ret != 0)
+ if (ret != 0) {
goto label_return;
+ }
node = ctl_named_node(nodes[depth-1]);
- if (node != NULL && node->ctl)
+ if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
- else {
+ } else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
}
@@ -948,8 +1180,7 @@ label_return:
}
int
-ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
-{
+ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) {
int ret;
if (!ctl_initialized && ctl_init(tsdn)) {
@@ -964,8 +1195,7 @@ label_return:
int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
size_t i;
@@ -982,7 +1212,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
/* Children are named. */
- if (node->nchildren <= (unsigned)mib[i]) {
+ if (node->nchildren <= mib[i]) {
ret = ENOENT;
goto label_return;
}
@@ -1001,9 +1231,9 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Call the ctl function. */
- if (node && node->ctl)
+ if (node && node->ctl) {
ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- else {
+ } else {
/* Partial MIB. */
ret = ENOENT;
}
@@ -1013,56 +1243,50 @@ label_return:
}
bool
-ctl_boot(void)
-{
-
- if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
- return (true);
+ctl_boot(void) {
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
ctl_initialized = false;
- return (false);
+ return false;
}
void
-ctl_prefork(tsdn_t *tsdn)
-{
-
+ctl_prefork(tsdn_t *tsdn) {
malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
-ctl_postfork_parent(tsdn_t *tsdn)
-{
-
+ctl_postfork_parent(tsdn_t *tsdn) {
malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
-ctl_postfork_child(tsdn_t *tsdn)
-{
-
+ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
/******************************************************************************/
/* *_ctl() functions. */
-#define READONLY() do { \
+#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
-#define WRITEONLY() do { \
+#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
-#define READ_XOR_WRITE() do { \
+#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
ret = EPERM; \
@@ -1070,7 +1294,7 @@ ctl_postfork_child(tsdn_t *tsdn)
} \
} while (0)
-#define READ(v, t) do { \
+#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
@@ -1083,7 +1307,7 @@ ctl_postfork_child(tsdn_t *tsdn)
} \
} while (0)
-#define WRITE(v, t) do { \
+#define WRITE(v, t) do { \
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
@@ -1093,43 +1317,53 @@ ctl_postfork_child(tsdn_t *tsdn)
} \
} while (0)
+#define MIB_UNSIGNED(v, i) do { \
+ if (mib[i] > UINT_MAX) { \
+ ret = EFAULT; \
+ goto label_return; \
+ } \
+ v = (unsigned)mib[i]; \
+} while (0)
+
/*
* There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded.
*/
-#define CTL_RO_CLGEN(c, l, n, v, t) \
+#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
- return (ENOENT); \
- if (l) \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
+ if (l) { \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- if (l) \
+ if (l) { \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
- return (ret); \
+ } \
+ return ret; \
}
-#define CTL_RO_CGEN(c, n, v, t) \
+#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
- return (ENOENT); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
@@ -1138,14 +1372,13 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
- return (ret); \
+ return ret; \
}
-#define CTL_RO_GEN(n, v, t) \
+#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1157,37 +1390,36 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
- return (ret); \
+ return ret; \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
-#define CTL_RO_NL_CGEN(c, n, v, t) \
+#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
- return (ENOENT); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
-#define CTL_RO_NL_GEN(n, v, t) \
+#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1197,33 +1429,32 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
-#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
- return (ENOENT); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
READONLY(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
-#define CTL_RO_CONFIG_GEN(n, t) \
+#define CTL_RO_CONFIG_GEN(n, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1233,7 +1464,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
/******************************************************************************/
@@ -1242,21 +1473,79 @@ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
UNUSED uint64_t newval;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
- if (newp != NULL)
+ if (newp != NULL) {
ctl_refresh(tsd_tsdn(tsd));
- READ(ctl_epoch, uint64_t);
+ }
+ READ(ctl_arenas->epoch, uint64_t);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
+background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ bool oldval;
+
+ if (!have_background_thread) {
+ return ENOENT;
+ }
+ background_thread_ctl_init(tsd_tsdn(tsd));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (newp == NULL) {
+ oldval = background_thread_enabled();
+ READ(oldval, bool);
+ } else {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = background_thread_enabled();
+ READ(oldval, bool);
+ bool newval = *(bool *)newp;
+ if (newval == oldval) {
+ ret = 0;
+ goto label_return;
+ }
+
+ background_thread_enabled_set(tsd_tsdn(tsd), newval);
+ if (newval) {
+ if (!can_enable_background_thread) {
+ malloc_printf("<jemalloc>: Error in dlsym("
+ "RTLD_NEXT, \"pthread_create\"). Cannot "
+ "enable background_thread\n");
+ ret = EFAULT;
+ goto label_return;
+ }
+ if (background_threads_enable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ } else {
+ if (background_threads_disable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+ }
ret = 0;
label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
+
+ return ret;
}
/******************************************************************************/
@@ -1266,37 +1555,34 @@ CTL_RO_CONFIG_GEN(config_debug, bool)
CTL_RO_CONFIG_GEN(config_fill, bool)
CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
-CTL_RO_CONFIG_GEN(config_munmap, bool)
CTL_RO_CONFIG_GEN(config_prof, bool)
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
CTL_RO_CONFIG_GEN(config_stats, bool)
-CTL_RO_CONFIG_GEN(config_tcache, bool)
CTL_RO_CONFIG_GEN(config_thp, bool)
-CTL_RO_CONFIG_GEN(config_tls, bool)
CTL_RO_CONFIG_GEN(config_utrace, bool)
-CTL_RO_CONFIG_GEN(config_valgrind, bool)
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
+CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
-CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
-CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
+CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
+ const char *)
+CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
+CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
+CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
-CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
-CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
-CTL_RO_NL_CGEN(config_thp, opt_thp, opt_thp, bool)
+CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
@@ -1313,29 +1599,41 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
static int
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
arena_t *oldarena;
unsigned newind, oldind;
oldarena = arena_choose(tsd, NULL);
- if (oldarena == NULL)
- return (EAGAIN);
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- newind = oldind = oldarena->ind;
+ if (oldarena == NULL) {
+ return EAGAIN;
+ }
+ newind = oldind = arena_ind_get(oldarena);
WRITE(newind, unsigned);
READ(oldind, unsigned);
+
if (newind != oldind) {
arena_t *newarena;
- if (newind >= ctl_stats.narenas) {
+ if (newind >= narenas_total_get()) {
/* New arena index is out of range. */
ret = EFAULT;
goto label_return;
}
+ if (have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
+ if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
+ /*
+ * If perCPU arena is enabled, thread_arena
+ * control is not allowed for the auto arena
+ * range.
+ */
+ ret = EPERM;
+ goto label_return;
+ }
+ }
+
/* Initialize arena if necessary. */
newarena = arena_get(tsd_tsdn(tsd), newind, true);
if (newarena == NULL) {
@@ -1344,19 +1642,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
- if (config_tcache) {
- tcache_t *tcache = tsd_tcache_get(tsd);
- if (tcache != NULL) {
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
- oldarena, newarena);
- }
+ if (tcache_available(tsd)) {
+ tcache_arena_reassociate(tsd_tsdn(tsd),
+ tsd_tcachep_get(tsd), newarena);
}
}
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
+ return ret;
}
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
@@ -1370,37 +1664,34 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_tcache)
- return (ENOENT);
-
- oldval = tcache_enabled_get();
+ oldval = tcache_enabled_get(tsd);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- tcache_enabled_set(*(bool *)newp);
+ tcache_enabled_set(tsd, *(bool *)newp);
}
READ(oldval, bool);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- if (!config_tcache)
- return (ENOENT);
+ if (!tcache_available(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
READONLY();
WRITEONLY();
@@ -1409,17 +1700,17 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
READ_XOR_WRITE();
@@ -1430,8 +1721,9 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
- 0)
+ 0) {
goto label_return;
+ }
} else {
const char *oldname = prof_thread_name_get(tsd);
READ(oldname, const char *);
@@ -1439,18 +1731,18 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
oldval = prof_thread_active_get(tsd);
if (newp != NULL) {
@@ -1467,21 +1759,17 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
/******************************************************************************/
static int
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned tcache_ind;
- if (!config_tcache)
- return (ENOENT);
-
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT;
@@ -1496,14 +1784,10 @@ label_return:
static int
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned tcache_ind;
- if (!config_tcache)
- return (ENOENT);
-
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1515,19 +1799,15 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned tcache_ind;
- if (!config_tcache)
- return (ENOENT);
-
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1539,25 +1819,50 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
/******************************************************************************/
-static void
-arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
-{
+static int
+arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ unsigned arena_ind;
+ bool initialized;
+
+ READONLY();
+ MIB_UNSIGNED(arena_ind, 1);
malloc_mutex_lock(tsdn, &ctl_mtx);
+ initialized = arenas_i(arena_ind)->initialized;
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ READ(initialized, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static void
+arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
+ malloc_mutex_lock(tsdn, &ctl_mtx);
{
- unsigned narenas = ctl_stats.narenas;
+ unsigned narenas = ctl_arenas->narenas;
- if (arena_ind == narenas) {
+ /*
+ * Access via index narenas is deprecated, and scheduled for
+ * removal in 6.0.0.
+ */
+ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
- for (i = 0; i < narenas; i++)
+ for (i = 0; i < narenas; i++) {
tarenas[i] = arena_get(tsdn, i, false);
+ }
/*
* No further need to hold ctl_mtx, since narenas and
@@ -1566,8 +1871,10 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge(tsdn, tarenas[i], all);
+ if (tarenas[i] != NULL) {
+ arena_decay(tsdn, tarenas[i], false,
+ all);
+ }
}
} else {
arena_t *tarena;
@@ -1579,88 +1886,172 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsdn, &ctl_mtx);
- if (tarena != NULL)
- arena_purge(tsdn, tarena, all);
+ if (tarena != NULL) {
+ arena_decay(tsdn, tarena, false, all);
+ }
}
}
}
static int
-arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ unsigned arena_ind;
READONLY();
WRITEONLY();
- arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
+ MIB_UNSIGNED(arena_ind, 1);
+ arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
-arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ unsigned arena_ind;
READONLY();
WRITEONLY();
- arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
+ MIB_UNSIGNED(arena_ind, 1);
+ arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
-arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
+ arena_t **arena) {
int ret;
- unsigned arena_ind;
- arena_t *arena;
READONLY();
WRITEONLY();
+ MIB_UNSIGNED(*arena_ind, 1);
- if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
- unlikely(opt_quarantine))) {
+ *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
+ if (*arena == NULL || arena_is_auto(*arena)) {
ret = EFAULT;
goto label_return;
}
- arena_ind = (unsigned)mib[1];
- if (config_debug) {
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- assert(arena_ind < ctl_stats.narenas);
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static void
+arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
+ /* Temporarily disable the background thread during arena reset. */
+ if (have_background_thread) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (background_thread_enabled()) {
+ unsigned ind = arena_ind % ncpus;
+ background_thread_info_t *info =
+ &background_thread_info[ind];
+ assert(info->state == background_thread_started);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_paused;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ }
+}
+
+static void
+arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
+ if (have_background_thread) {
+ if (background_thread_enabled()) {
+ unsigned ind = arena_ind % ncpus;
+ background_thread_info_t *info =
+ &background_thread_info[ind];
+ assert(info->state = background_thread_paused);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_started;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
}
- assert(arena_ind >= opt_narenas);
+}
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+static int
+arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+ ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
+ newp, newlen, &arena_ind, &arena);
+ if (ret != 0) {
+ return ret;
+ }
+
+ arena_reset_prepare_background_thread(tsd, arena_ind);
arena_reset(tsd, arena);
+ arena_reset_finish_background_thread(tsd, arena_ind);
- ret = 0;
+ return ret;
+}
+
+static int
+arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+ ctl_arena_t *ctl_darena, *ctl_arena;
+
+ ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
+ newp, newlen, &arena_ind, &arena);
+ if (ret != 0) {
+ goto label_return;
+ }
+
+ if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
+ true) != 0) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ arena_reset_prepare_background_thread(tsd, arena_ind);
+ /* Merge stats after resetting and purging arena. */
+ arena_reset(tsd, arena);
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
+ ctl_darena->initialized = true;
+ ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
+ /* Destroy arena. */
+ arena_destroy(tsd, arena);
+ ctl_arena = arenas_i(arena_ind);
+ ctl_arena->initialized = false;
+ /* Record arena index for later recycling via arenas.create. */
+ ql_elm_new(ctl_arena, destroyed_link);
+ ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
+ arena_reset_finish_background_thread(tsd, arena_ind);
+
+ assert(ret == 0);
label_return:
- return (ret);
+ return ret;
}
static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *dss = NULL;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind;
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *);
+ MIB_UNSIGNED(arena_ind, 1);
if (dss != NULL) {
int i;
bool match = false;
@@ -1679,21 +2070,26 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
}
- if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL || (dss_prec != dss_prec_limit &&
- arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
+ /*
+ * Access via index narenas is deprecated, and scheduled for removal in
+ * 6.0.0.
+ */
+ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
+ ctl_arenas->narenas) {
+ if (dss_prec != dss_prec_limit &&
+ extent_dss_prec_set(dss_prec)) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
+ dss_prec_old = extent_dss_prec_get();
} else {
- if (dss_prec != dss_prec_limit &&
- chunk_dss_prec_set(dss_prec)) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(arena, dss_prec))) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = chunk_dss_prec_get();
+ dss_prec_old = arena_dss_prec_get(arena);
}
dss = dss_prec_names[dss_prec_old];
@@ -1702,17 +2098,17 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
+ return ret;
}
static int
-arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind;
arena_t *arena;
+ MIB_UNSIGNED(arena_ind, 1);
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
@@ -1720,7 +2116,8 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
+ size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
+ arena_muzzy_decay_ms_get(arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1728,8 +2125,9 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp)) {
+ if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
+ arena, *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1737,65 +2135,46 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
-arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- unsigned arena_ind = (unsigned)mib[1];
- arena_t *arena;
-
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL) {
- ret = EFAULT;
- goto label_return;
- }
-
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_decay_time_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
+arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, true);
+}
- ret = 0;
-label_return:
- return (ret);
+static int
+arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, false);
}
static int
-arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind;
arena_t *arena;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ MIB_UNSIGNED(arena_ind, 1);
if (arena_ind < narenas_total_get() && (arena =
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
if (newp != NULL) {
- chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
- WRITE(new_chunk_hooks, chunk_hooks_t);
- old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
- &new_chunk_hooks);
- READ(old_chunk_hooks, chunk_hooks_t);
+ extent_hooks_t *old_extent_hooks;
+ extent_hooks_t *new_extent_hooks
+ JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_extent_hooks, extent_hooks_t *);
+ old_extent_hooks = extent_hooks_set(tsd, arena,
+ new_extent_hooks);
+ READ(old_extent_hooks, extent_hooks_t *);
} else {
- chunk_hooks_t old_chunk_hooks =
- chunk_hooks_get(tsd_tsdn(tsd), arena);
- READ(old_chunk_hooks, chunk_hooks_t);
+ extent_hooks_t *old_extent_hooks =
+ extent_hooks_get(arena);
+ READ(old_extent_hooks, extent_hooks_t *);
}
} else {
ret = EFAULT;
@@ -1804,32 +2183,37 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
+ return ret;
}
static const ctl_named_node_t *
-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx);
- if (i > ctl_stats.narenas) {
- ret = NULL;
- goto label_return;
+ switch (i) {
+ case MALLCTL_ARENAS_ALL:
+ case MALLCTL_ARENAS_DESTROYED:
+ break;
+ default:
+ if (i > ctl_arenas->narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+ break;
}
ret = super_arena_i_node;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
- return (ret);
+ return ret;
}
/******************************************************************************/
static int
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned narenas;
@@ -1839,49 +2223,23 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- narenas = ctl_stats.narenas;
+ narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
-}
-
-static int
-arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- unsigned nread, i;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- READONLY();
- if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
- ret = EINVAL;
- nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
- ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
- } else {
- ret = 0;
- nread = ctl_stats.narenas;
- }
-
- for (i = 0; i < nread; i++)
- ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
-
-label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
+ return ret;
}
static int
-arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_lg_dirty_mult_default_get();
+ size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
+ arena_muzzy_decay_ms_default_get());
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1889,7 +2247,8 @@ arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
+ if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
+ : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1897,108 +2256,86 @@ arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
-arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_default_get();
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_decay_time_default_set(*(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
+arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, true);
+}
- ret = 0;
-label_return:
- return (ret);
+static int
+arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, false);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
-CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
-CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
static const ctl_named_node_t *
-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > NBINS)
- return (NULL);
- return (super_arenas_bin_i_node);
-}
-
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
-CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
-static const ctl_named_node_t *
-arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nlclasses)
- return (NULL);
- return (super_arenas_lrun_i_node);
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
+ if (i > NBINS) {
+ return NULL;
+ }
+ return super_arenas_bin_i_node;
}
-CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
-CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
+CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
+CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t *
-arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nhclasses)
- return (NULL);
- return (super_arenas_hchunk_i_node);
+arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (i > NSIZES - NBINS) {
+ return NULL;
+ }
+ return super_arenas_lextent_i_node;
}
static int
-arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- unsigned narenas;
+ extent_hooks_t *extent_hooks;
+ unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- READONLY();
- if (ctl_grow(tsd_tsdn(tsd))) {
+
+ extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+ WRITE(extent_hooks, extent_hooks_t *);
+ if ((arena_ind = ctl_arena_init(tsd_tsdn(tsd), extent_hooks)) ==
+ UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
- narenas = ctl_stats.narenas - 1;
- READ(narenas, unsigned);
+ READ(arena_ind, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- return (ret);
+ return ret;
}
/******************************************************************************/
static int
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
if (newp != NULL) {
if (newlen != sizeof(bool)) {
@@ -2007,24 +2344,25 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
- } else
+ } else {
oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ }
READ(oldval, bool);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
if (newp != NULL) {
if (newlen != sizeof(bool)) {
@@ -2032,24 +2370,25 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
goto label_return;
}
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
- } else
+ } else {
oldval = prof_active_get(tsd_tsdn(tsd));
+ }
READ(oldval, bool);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *filename = NULL;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
WRITEONLY();
WRITE(filename, const char *);
@@ -2061,18 +2400,18 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
if (newp != NULL) {
if (newlen != sizeof(bool)) {
@@ -2080,35 +2419,37 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
goto label_return;
}
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
- } else
+ } else {
oldval = prof_gdump_get(tsd_tsdn(tsd));
+ }
READ(oldval, bool);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t lg_sample = lg_prof_sample;
- if (!config_prof)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
WRITEONLY();
WRITE(lg_sample, size_t);
- if (lg_sample >= (sizeof(uint64_t) << 3))
+ if (lg_sample >= (sizeof(uint64_t) << 3)) {
lg_sample = (sizeof(uint64_t) << 3) - 1;
+ }
prof_reset(tsd, lg_sample);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
@@ -2116,137 +2457,236 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/
-CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
-CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
-CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
-
-CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
-CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
+
+CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
+ ctl_stats->background_thread.num_threads, size_t)
+CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
+ ctl_stats->background_thread.num_runs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
+ nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
+CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t)
-CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
+CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
ssize_t)
-CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_uptime,
+ nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
+CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
+CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
+ size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- ctl_stats.arenas[mib[2]].astats.retained, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
- ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
- ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
- ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
- ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
- ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
+ size_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
+ arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged),
+ uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
+ arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged),
+ uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_base,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
+ ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
+ size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
- ctl_stats.arenas[mib[2]].allocated_small, size_t)
+ arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
- ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
+ arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
- ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
+ arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
- ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
+ arenas_i(mib[2])->astats->nrequests_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
+ ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
- ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
- ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
- ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
- ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
+ uint64_t) /* Intentional. */
+
+/* Lock profiling related APIs below. */
+#define RO_MUTEX_CTL_GEN(n, l) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
+ l.n_lock_ops, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
+ l.n_wait_times, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
+ l.n_spin_acquired, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
+ l.n_owner_switches, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
+ nstime_ns(&l.tot_wait_time), uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
+ nstime_ns(&l.max_wait_time), uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
+ l.max_n_thds, uint32_t)
+
+/* Global mutexes. */
+#define OP(mtx) \
+ RO_MUTEX_CTL_GEN(mutexes_##mtx, \
+ ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+/* Per arena mutexes */
+#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
+ arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+/* tcache bin mutex */
+RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
+ arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
+#undef RO_MUTEX_CTL_GEN
+
+/* Resets all mutex stats, including global, arena and bin mutexes. */
+static int
+stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ if (!config_stats) {
+ return ENOENT;
+ }
+
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+
+#define MUTEX_PROF_RESET(mtx) \
+ malloc_mutex_lock(tsdn, &mtx); \
+ malloc_mutex_prof_data_reset(tsdn, &mtx); \
+ malloc_mutex_unlock(tsdn, &mtx);
+
+ /* Global mutexes: ctl and prof. */
+ MUTEX_PROF_RESET(ctl_mtx);
+ if (have_background_thread) {
+ MUTEX_PROF_RESET(background_thread_lock);
+ }
+ if (config_prof && opt_prof) {
+ MUTEX_PROF_RESET(bt2gctx_mtx);
+ }
+
+
+ /* Per arena mutexes. */
+ unsigned n = narenas_total_get();
+
+ for (unsigned i = 0; i < n; i++) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (!arena) {
+ continue;
+ }
+ MUTEX_PROF_RESET(arena->large_mtx);
+ MUTEX_PROF_RESET(arena->extent_avail_mtx);
+ MUTEX_PROF_RESET(arena->extents_dirty.mtx);
+ MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->extents_retained.mtx);
+ MUTEX_PROF_RESET(arena->decay_dirty.mtx);
+ MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->tcache_ql_mtx);
+ MUTEX_PROF_RESET(arena->base->mtx);
+
+ for (szind_t i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ MUTEX_PROF_RESET(bin->lock);
+ }
+ }
+#undef MUTEX_PROF_RESET
+ return 0;
+}
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
-{
-
- if (j > NBINS)
- return (NULL);
- return (super_stats_arenas_i_bins_j_node);
-}
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
-{
-
- if (j > nlclasses)
- return (NULL);
- return (super_stats_arenas_i_lruns_j_node);
+ size_t j) {
+ if (j > NBINS) {
+ return NULL;
+ }
+ return super_stats_arenas_i_bins_j_node;
}
-CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
- ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
- ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
- ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc),
uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
- ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
+ arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
static const ctl_named_node_t *
-stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
-{
-
- if (j > nhclasses)
- return (NULL);
- return (super_stats_arenas_i_hchunks_j_node);
+stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j) {
+ if (j > NSIZES - NBINS) {
+ return NULL;
+ }
+ return super_stats_arenas_i_lextents_j_node;
}
static const ctl_named_node_t *
-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
- const ctl_named_node_t * ret;
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
+ const ctl_named_node_t *ret;
+ size_t a;
malloc_mutex_lock(tsdn, &ctl_mtx);
- if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
+ a = arenas_i2a_impl(i, true, true);
+ if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
ret = NULL;
goto label_return;
}
@@ -2254,5 +2694,5 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
- return (ret);
+ return ret;
}
diff --git a/contrib/jemalloc/src/extent.c b/contrib/jemalloc/src/extent.c
index ff8de2fe916c..f31ed32ebb9a 100644
--- a/contrib/jemalloc/src/extent.c
+++ b/contrib/jemalloc/src/extent.c
@@ -1,33 +1,260 @@
-#define JEMALLOC_EXTENT_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+
+/******************************************************************************/
+/* Data. */
+
+rtree_t extents_rtree;
+/* Keyed by the address of the extent_t being protected. */
+mutex_pool_t extent_mutex_pool;
+
+static const bitmap_info_t extents_bitmap_info =
+ BITMAP_INFO_INITIALIZER(NPSIZES+1);
+
+static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit,
+ unsigned arena_ind);
+static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+static bool extent_decommit_default(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+#ifdef PAGES_CAN_PURGE_LAZY
+static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+#endif
+static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+#ifdef PAGES_CAN_PURGE_FORCED
+static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+#endif
+static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t size_a, size_t size_b, bool committed,
+ unsigned arena_ind);
+#endif
+static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
+ bool growing_retained);
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
+ unsigned arena_ind);
+#endif
+static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
+ bool growing_retained);
+
+const extent_hooks_t extent_hooks_default = {
+ extent_alloc_default,
+ extent_dalloc_default,
+ extent_destroy_default,
+ extent_commit_default,
+ extent_decommit_default
+#ifdef PAGES_CAN_PURGE_LAZY
+ ,
+ extent_purge_lazy_default
+#else
+ ,
+ NULL
+#endif
+#ifdef PAGES_CAN_PURGE_FORCED
+ ,
+ extent_purge_forced_default
+#else
+ ,
+ NULL
+#endif
+#ifdef JEMALLOC_MAPS_COALESCE
+ ,
+ extent_split_default,
+ extent_merge_default
+#endif
+};
+
+/* Used exclusively for gdump triggering. */
+static atomic_zu_t curpages;
+static atomic_zu_t highpages;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
+static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
+ size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
+ bool *zero, bool *commit, bool growing_retained);
+static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained);
+static void extent_record(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
+ bool growing_retained);
/******************************************************************************/
+rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
+ extent_esnead_comp)
+
+typedef enum {
+ lock_result_success,
+ lock_result_failure,
+ lock_result_no_extent
+} lock_result_t;
+
+static lock_result_t
+extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
+ extent_t **result) {
+ extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
+ elm, true);
+
+ if (extent1 == NULL) {
+ return lock_result_no_extent;
+ }
+ /*
+ * It's possible that the extent changed out from under us, and with it
+ * the leaf->extent mapping. We have to recheck while holding the lock.
+ */
+ extent_lock(tsdn, extent1);
+ extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
+ &extents_rtree, elm, true);
+
+ if (extent1 == extent2) {
+ *result = extent1;
+ return lock_result_success;
+ } else {
+ extent_unlock(tsdn, extent1);
+ return lock_result_failure;
+ }
+}
+
+/*
+ * Returns a pool-locked extent_t * if there's one associated with the given
+ * address, and NULL otherwise.
+ */
+static extent_t *
+extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
+ extent_t *ret = NULL;
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)addr, false, false);
+ if (elm == NULL) {
+ return NULL;
+ }
+ lock_result_t lock_result;
+ do {
+ lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
+ } while (lock_result == lock_result_failure);
+ return ret;
+}
+
+extent_t *
+extent_alloc(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
+ extent_t *extent = extent_avail_first(&arena->extent_avail);
+ if (extent == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+ return base_alloc_extent(tsdn, arena->base);
+ }
+ extent_avail_remove(&arena->extent_avail, extent);
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+ return extent;
+}
+
+void
+extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
+ extent_avail_insert(&arena->extent_avail, extent);
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+}
+
+extent_hooks_t *
+extent_hooks_get(arena_t *arena) {
+ return base_extent_hooks_get(arena->base);
+}
+
+extent_hooks_t *
+extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
+ background_thread_info_t *info;
+ if (have_background_thread) {
+ info = arena_background_thread_info_get(arena);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ }
+ extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+
+ return ret;
+}
+
+static void
+extent_hooks_assure_initialized(arena_t *arena,
+ extent_hooks_t **r_extent_hooks) {
+ if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
+ *r_extent_hooks = extent_hooks_get(arena);
+ }
+}
+
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_floor(size_t size) {
size_t ret;
- szind_t ind;
+ pszind_t pind;
assert(size > 0);
+ assert((size & PAGE_MASK) == 0);
- ind = size2index(size + 1);
- if (ind == 0) {
- /* Avoid underflow. */
- return (index2size(0));
+ pind = sz_psz2ind(size - sz_large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return size;
}
- ret = index2size(ind - 1);
+ ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
- return (ret);
+ return ret;
}
+#ifndef JEMALLOC_JET
+static
+#endif
size_t
extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
+ assert(size - sz_large_pad <= LARGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
@@ -39,58 +266,1654 @@ extent_size_quantize_ceil(size_t size) {
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
- ret = index2size(size2index(ret + 1));
+ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
+ sz_large_pad;
+ }
+ return ret;
+}
+
+/* Generate pairing heap functions. */
+ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
+
+bool
+extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
+ bool delay_coalesce) {
+ if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ for (unsigned i = 0; i < NPSIZES+1; i++) {
+ extent_heap_new(&extents->heaps[i]);
+ }
+ bitmap_init(extents->bitmap, &extents_bitmap_info, true);
+ extent_list_init(&extents->lru);
+ atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
+ extents->state = state;
+ extents->delay_coalesce = delay_coalesce;
+ return false;
+}
+
+extent_state_t
+extents_state_get(const extents_t *extents) {
+ return extents->state;
+}
+
+size_t
+extents_npages_get(extents_t *extents) {
+ return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+}
+
+static void
+extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
+ bool preserve_lru) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+ assert(extent_state_get(extent) == extents->state);
+
+ size_t size = extent_size_get(extent);
+ size_t psz = extent_size_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ if (extent_heap_empty(&extents->heaps[pind])) {
+ bitmap_unset(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ }
+ extent_heap_insert(&extents->heaps[pind], extent);
+ if (!preserve_lru) {
+ extent_list_append(&extents->lru, extent);
+ }
+ size_t npages = size >> LG_PAGE;
+ /*
+ * All modifications to npages hold the mutex (as asserted above), so we
+ * don't need an atomic fetch-add; we can get by with a load followed by
+ * a store.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+ atomic_store_zu(&extents->npages, cur_extents_npages + npages,
+ ATOMIC_RELAXED);
+}
+
+static void
+extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
+ bool preserve_lru) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+ assert(extent_state_get(extent) == extents->state);
+
+ size_t size = extent_size_get(extent);
+ size_t psz = extent_size_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ extent_heap_remove(&extents->heaps[pind], extent);
+ if (extent_heap_empty(&extents->heaps[pind])) {
+ bitmap_set(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ }
+ if (!preserve_lru) {
+ extent_list_remove(&extents->lru, extent);
+ }
+ size_t npages = size >> LG_PAGE;
+ /*
+ * As in extents_insert_locked, we hold extents->mtx and so don't need
+ * atomic operations for updating extents->npages.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+ assert(cur_extents_npages >= npages);
+ atomic_store_zu(&extents->npages,
+ cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
+}
+
+/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
+static extent_t *
+extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
+ pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ if (i < NPSIZES+1) {
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_any(&extents->heaps[i]);
+ assert(extent_size_get(extent) >= size);
+ return extent;
}
+
+ return NULL;
+}
+
+/*
+ * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
+ * large enough.
+ */
+static extent_t *
+extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ extent_t *ret = NULL;
+
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
+ for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
+ &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
+ (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)i+1)) {
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
+ assert(extent_size_get(extent) >= size);
+ if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
+ ret = extent;
+ }
+ if (i == NPSIZES) {
+ break;
+ }
+ assert(i < NPSIZES);
+ }
+
return ret;
}
-JEMALLOC_INLINE_C int
-extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
-{
- size_t a_qsize = extent_size_quantize_floor(extent_node_size_get(a));
- size_t b_qsize = extent_size_quantize_floor(extent_node_size_get(b));
+/*
+ * Do {best,first}-fit extent selection, where the selection policy choice is
+ * based on extents->delay_coalesce. Best-fit selection requires less
+ * searching, but its layout policy is less stable and may cause higher virtual
+ * memory fragmentation as a side effect.
+ */
+static extent_t *
+extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
- return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
+ return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
+ extents, size) : extents_first_fit_locked(tsdn, arena, extents,
+ size);
}
-JEMALLOC_INLINE_C int
-extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
-{
- size_t a_sn = extent_node_sn_get(a);
- size_t b_sn = extent_node_sn_get(b);
+static bool
+extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent) {
+ extent_state_set(extent, extent_state_active);
+ bool coalesced;
+ extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, extent, &coalesced, false);
+ extent_state_set(extent, extents_state_get(extents));
- return ((a_sn > b_sn) - (a_sn < b_sn));
+ if (!coalesced) {
+ return true;
+ }
+ extents_insert_locked(tsdn, extents, extent, true);
+ return false;
}
-JEMALLOC_INLINE_C int
-extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
-{
- uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
- uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
+extent_t *
+extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ assert(size + pad != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- return ((a_addr > b_addr) - (a_addr < b_addr));
+ return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
+ size, pad, alignment, slab, szind, zero, commit, false);
}
-JEMALLOC_INLINE_C int
-extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
-{
- int ret;
+void
+extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent) {
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- ret = extent_sz_comp(a, b);
- if (ret != 0)
- return (ret);
+ extent_addr_set(extent, extent_base_get(extent));
+ extent_zeroed_set(extent, false);
- ret = extent_sn_comp(a, b);
- if (ret != 0)
- return (ret);
+ extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
+}
+
+extent_t *
+extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, size_t npages_min) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ malloc_mutex_lock(tsdn, &extents->mtx);
+
+ /*
+ * Get the LRU coalesced extent, if any. If coalescing was delayed,
+ * the loop will iterate until the LRU extent is fully coalesced.
+ */
+ extent_t *extent;
+ while (true) {
+ /* Get the LRU extent, if any. */
+ extent = extent_list_first(&extents->lru);
+ if (extent == NULL) {
+ goto label_return;
+ }
+ /* Check the eviction limit. */
+ size_t npages = extent_size_get(extent) >> LG_PAGE;
+ size_t extents_npages = atomic_load_zu(&extents->npages,
+ ATOMIC_RELAXED);
+ if (extents_npages - npages < npages_min) {
+ extent = NULL;
+ goto label_return;
+ }
+ extents_remove_locked(tsdn, extents, extent, false);
+ if (!extents->delay_coalesce) {
+ break;
+ }
+ /* Try to coalesce. */
+ if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, extent)) {
+ break;
+ }
+ /*
+ * The LRU extent was just coalesced and the result placed in
+ * the LRU at its neighbor's position. Start over.
+ */
+ }
+
+ /*
+ * Either mark the extent active or deregister it to protect against
+ * concurrent operations.
+ */
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ case extent_state_muzzy:
+ extent_state_set(extent, extent_state_active);
+ break;
+ case extent_state_retained:
+ extent_deregister(tsdn, extent);
+ break;
+ default:
+ not_reached();
+ }
+
+label_return:
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ return extent;
+}
+
+static void
+extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent, bool growing_retained) {
+ /*
+ * Leak extent after making sure its pages have already been purged, so
+ * that this is only a virtual memory leak.
+ */
+ if (extents_state_get(extents) == extent_state_dirty) {
+ if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
+ extent, 0, extent_size_get(extent), growing_retained)) {
+ extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
+ extent, 0, extent_size_get(extent),
+ growing_retained);
+ }
+ }
+ extent_dalloc(tsdn, arena, extent);
+}
- ret = extent_ad_comp(a, b);
- return (ret);
+void
+extents_prefork(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_prefork(tsdn, &extents->mtx);
}
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
- extent_szsnad_comp)
+void
+extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_postfork_parent(tsdn, &extents->mtx);
+}
+
+void
+extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_postfork_child(tsdn, &extents->mtx);
+}
+
+static void
+extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent, bool preserve_lru) {
+ assert(extent_arena_get(extent) == arena);
+ assert(extent_state_get(extent) == extent_state_active);
+
+ extent_state_set(extent, extents_state_get(extents));
+ extents_insert_locked(tsdn, extents, extent, preserve_lru);
+}
+
+static void
+extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent, bool preserve_lru) {
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru);
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+}
+
+static void
+extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent, bool preserve_lru) {
+ assert(extent_arena_get(extent) == arena);
+ assert(extent_state_get(extent) == extents_state_get(extents));
+
+ extents_remove_locked(tsdn, extents, extent, preserve_lru);
+ extent_state_set(extent, extent_state_active);
+}
+
+static bool
+extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
+ const extent_t *extent, bool dependent, bool init_missing,
+ rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
+ *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_a == NULL) {
+ return true;
+ }
+ assert(*r_elm_a != NULL);
+
+ *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_last_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_b == NULL) {
+ return true;
+ }
+ assert(*r_elm_b != NULL);
+
+ return false;
+}
+
+static void
+extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
+ rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
+ if (elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
+ slab);
+ }
+}
+
+static void
+extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
+ szind_t szind) {
+ assert(extent_slab_get(extent));
+
+ /* Register interior. */
+ for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_write(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE), extent, szind, true);
+ }
+}
+
+static void
+extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
+ cassert(config_prof);
+ /* prof_gdump() requirement. */
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (opt_prof && extent_state_get(extent) == extent_state_active) {
+ size_t nadd = extent_size_get(extent) >> LG_PAGE;
+ size_t cur = atomic_fetch_add_zu(&curpages, nadd,
+ ATOMIC_RELAXED) + nadd;
+ size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
+ while (cur > high && !atomic_compare_exchange_weak_zu(
+ &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highpages update race.
+ * Note that high is updated in case of CAS failure.
+ */
+ }
+ if (cur > high && prof_gdump_get_unlocked()) {
+ prof_gdump(tsdn);
+ }
+ }
+}
+
+static void
+extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
+ cassert(config_prof);
+
+ if (opt_prof && extent_state_get(extent) == extent_state_active) {
+ size_t nsub = extent_size_get(extent) >> LG_PAGE;
+ assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
+ atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
+ }
+}
+
+static bool
+extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *elm_a, *elm_b;
+
+ /*
+ * We need to hold the lock to protect against a concurrent coalesce
+ * operation that sees us in a partial state.
+ */
+ extent_lock(tsdn, extent);
+
+ if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
+ &elm_a, &elm_b)) {
+ return true;
+ }
+
+ szind_t szind = extent_szind_get_maybe_invalid(extent);
+ bool slab = extent_slab_get(extent);
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
+ if (slab) {
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+
+ extent_unlock(tsdn, extent);
+
+ if (config_prof && gdump_add) {
+ extent_gdump_add(tsdn, extent);
+ }
+
+ return false;
+}
+
+static bool
+extent_register(tsdn_t *tsdn, extent_t *extent) {
+ return extent_register_impl(tsdn, extent, true);
+}
+
+static bool
+extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
+ return extent_register_impl(tsdn, extent, false);
+}
+
+static void
+extent_reregister(tsdn_t *tsdn, extent_t *extent) {
+ bool err = extent_register(tsdn, extent);
+ assert(!err);
+}
+
+static void
+extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
+ extent_t *extent) {
+ size_t i;
+
+ assert(extent_slab_get(extent));
+
+ for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_clear(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE));
+ }
+}
+
+static void
+extent_deregister(tsdn_t *tsdn, extent_t *extent) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
+ &elm_a, &elm_b);
+
+ extent_lock(tsdn, extent);
+
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, rtree_ctx, extent);
+ extent_slab_set(extent, false);
+ }
+
+ extent_unlock(tsdn, extent);
+
+ if (config_prof) {
+ extent_gdump_sub(tsdn, extent);
+ }
+}
+
+static extent_t *
+extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ bool *zero, bool *commit, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(alignment > 0);
+ if (config_debug && new_addr != NULL) {
+ /*
+ * Non-NULL new_addr has two use cases:
+ *
+ * 1) Recycle a known-extant extent, e.g. during purging.
+ * 2) Perform in-place expanding reallocation.
+ *
+ * Regardless of use case, new_addr must either refer to a
+ * non-existing extent, or to the base of an extant extent,
+ * since only active slabs support interior lookups (which of
+ * course cannot be recycled).
+ */
+ assert(PAGE_ADDR2BASE(new_addr) == new_addr);
+ assert(pad == 0);
+ assert(alignment <= PAGE);
+ }
+
+ size_t esize = size + pad;
+ size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < esize) {
+ return NULL;
+ }
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ extent_t *extent;
+ if (new_addr != NULL) {
+ extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
+ if (extent != NULL) {
+ /*
+ * We might null-out extent to report an error, but we
+ * still need to unlock the associated mutex after.
+ */
+ extent_t *unlock_extent = extent;
+ assert(extent_base_get(extent) == new_addr);
+ if (extent_arena_get(extent) != arena ||
+ extent_size_get(extent) < esize ||
+ extent_state_get(extent) !=
+ extents_state_get(extents)) {
+ extent = NULL;
+ }
+ extent_unlock(tsdn, unlock_extent);
+ }
+ } else {
+ extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
+ }
+ if (extent == NULL) {
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ return NULL;
+ }
+
+ extent_activate_locked(tsdn, arena, extents, extent, false);
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+
+ if (extent_zeroed_get(extent)) {
+ *zero = true;
+ }
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+
+ return extent;
+}
+
+static extent_t *
+extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ szind_t szind, extent_t *extent, bool growing_retained) {
+ size_t esize = size + pad;
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
+ PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_size_get(extent) >= leadsize + esize);
+ size_t trailsize = extent_size_get(extent) - leadsize - esize;
+
+ /* Split the lead. */
+ if (leadsize != 0) {
+ extent_t *lead = extent;
+ extent = extent_split_impl(tsdn, arena, r_extent_hooks,
+ lead, leadsize, NSIZES, false, esize + trailsize, szind,
+ slab, growing_retained);
+ if (extent == NULL) {
+ extent_deregister(tsdn, lead);
+ extents_leak(tsdn, arena, r_extent_hooks, extents,
+ lead, growing_retained);
+ return NULL;
+ }
+ extent_deactivate(tsdn, arena, extents, lead, false);
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ extent_t *trail = extent_split_impl(tsdn, arena,
+ r_extent_hooks, extent, esize, szind, slab, trailsize,
+ NSIZES, false, growing_retained);
+ if (trail == NULL) {
+ extent_deregister(tsdn, extent);
+ extents_leak(tsdn, arena, r_extent_hooks, extents,
+ extent, growing_retained);
+ return NULL;
+ }
+ extent_deactivate(tsdn, arena, extents, trail, false);
+ } else if (leadsize == 0) {
+ /*
+ * Splitting causes szind to be set as a side effect, but no
+ * splitting occurred.
+ */
+ extent_szind_set(extent, szind);
+ if (szind != NSIZES) {
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(extent), szind, slab);
+ if (slab && extent_size_get(extent) > PAGE) {
+ rtree_szind_slab_update(tsdn, &extents_rtree,
+ rtree_ctx,
+ (uintptr_t)extent_past_get(extent) -
+ (uintptr_t)PAGE, szind, slab);
+ }
+ }
+ }
+
+ return extent;
+}
+
+static extent_t *
+extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(new_addr == NULL || !slab);
+ assert(pad == 0 || !slab);
+ assert(!*zero || !slab);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ bool committed = false;
+ extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero,
+ &committed, growing_retained);
+ if (extent == NULL) {
+ return NULL;
+ }
+ if (committed) {
+ *commit = true;
+ }
+
+ extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, new_addr, size, pad, alignment, slab, szind, extent,
+ growing_retained);
+ if (extent == NULL) {
+ return NULL;
+ }
+
+ if (*commit && !extent_committed_get(extent)) {
+ if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent), growing_retained)) {
+ extent_record(tsdn, arena, r_extent_hooks, extents,
+ extent, growing_retained);
+ return NULL;
+ }
+ extent_zeroed_set(extent, true);
+ }
+
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ assert(extent_state_get(extent) == extent_state_active);
+ if (slab) {
+ extent_slab_set(extent, slab);
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+
+ if (*zero) {
+ void *addr = extent_base_get(extent);
+ size_t size = extent_size_get(extent);
+ if (!extent_zeroed_get(extent)) {
+ if (pages_purge_forced(addr, size)) {
+ memset(addr, 0, size);
+ }
+ } else if (config_debug) {
+ size_t *p = (size_t *)(uintptr_t)addr;
+ for (size_t i = 0; i < size / sizeof(size_t); i++) {
+ assert(p[i] == 0);
+ }
+ }
+ }
+ return extent;
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
+ * advantage of this to avoid demanding zeroed extents, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
+ void *ret;
+
+ assert(size != 0);
+ assert(alignment != 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+ /* mmap. */
+ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
+ != NULL) {
+ return ret;
+ }
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+
+ /* All strategies for allocation failed. */
+ return NULL;
+}
+
+static void *
+extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit) {
+ void *ret;
+
+ ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
+ ATOMIC_RELAXED));
+ return ret;
+}
+
+static void *
+extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ tsdn_t *tsdn;
+ arena_t *arena;
+
+ tsdn = tsdn_fetch();
+ arena = arena_get(tsdn, arena_ind, false);
+ /*
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
+ */
+ assert(arena != NULL);
+
+ return extent_alloc_default_impl(tsdn, arena, new_addr, size,
+ alignment, zero, commit);
+}
+
+/*
+ * If virtual memory is retained, create increasingly larger extents from which
+ * to split requested extents in order to limit the total number of disjoint
+ * virtual memory ranges retained by each arena.
+ */
+static extent_t *
+extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
+ bool slab, szind_t szind, bool *zero, bool *commit) {
+ malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
+ assert(pad == 0 || !slab);
+ assert(!*zero || !slab);
+
+ size_t esize = size + pad;
+ size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size_min < esize) {
+ goto label_err;
+ }
+ /*
+ * Find the next extent size in the series that would be large enough to
+ * satisfy this request.
+ */
+ pszind_t egn_skip = 0;
+ size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ while (alloc_size < alloc_size_min) {
+ egn_skip++;
+ if (arena->extent_grow_next + egn_skip == NPSIZES) {
+ /* Outside legal range. */
+ goto label_err;
+ }
+ assert(arena->extent_grow_next + egn_skip < NPSIZES);
+ alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ }
+
+ extent_t *extent = extent_alloc(tsdn, arena);
+ if (extent == NULL) {
+ goto label_err;
+ }
+ bool zeroed = false;
+ bool committed = false;
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
+ void *ptr;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE,
+ &zeroed, &committed, (dss_prec_t)atomic_load_u(
+ &arena->dss_prec, ATOMIC_RELAXED));
+ } else {
+ ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
+ alloc_size, PAGE, &zeroed, &committed,
+ arena_ind_get(arena));
+ }
+
+ extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
+ arena_extent_sn_next(arena), extent_state_active, zeroed,
+ committed);
+ if (ptr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ goto label_err;
+ }
+ if (extent_register_no_gdump_add(tsdn, extent)) {
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
+ PAGE_CEILING(alignment)) - (uintptr_t)ptr;
+ assert(alloc_size >= leadsize + esize);
+ size_t trailsize = alloc_size - leadsize - esize;
+ if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
+ *zero = true;
+ }
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+
+ /* Split the lead. */
+ if (leadsize != 0) {
+ extent_t *lead = extent;
+ extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead,
+ leadsize, NSIZES, false, esize + trailsize, szind, slab,
+ true);
+ if (extent == NULL) {
+ extent_deregister(tsdn, lead);
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, lead, true);
+ goto label_err;
+ }
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, lead, true);
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks,
+ extent, esize, szind, slab, trailsize, NSIZES, false, true);
+ if (trail == NULL) {
+ extent_deregister(tsdn, extent);
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, trail, true);
+ } else if (leadsize == 0) {
+ /*
+ * Splitting causes szind to be set as a side effect, but no
+ * splitting occurred.
+ */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ extent_szind_set(extent, szind);
+ if (szind != NSIZES) {
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(extent), szind, slab);
+ if (slab && extent_size_get(extent) > PAGE) {
+ rtree_szind_slab_update(tsdn, &extents_rtree,
+ rtree_ctx,
+ (uintptr_t)extent_past_get(extent) -
+ (uintptr_t)PAGE, szind, slab);
+ }
+ }
+ }
+
+ if (*commit && !extent_committed_get(extent)) {
+ if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
+ extent_size_get(extent), true)) {
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+ extent_zeroed_set(extent, true);
+ }
+
+ /*
+ * Increment extent_grow_next if doing so wouldn't exceed the legal
+ * range.
+ */
+ if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) {
+ arena->extent_grow_next += egn_skip + 1;
+ } else {
+ arena->extent_grow_next = NPSIZES - 1;
+ }
+ /* All opportunities for failure are past. */
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+
+ if (config_prof) {
+ /* Adjust gdump stats now that extent is final size. */
+ extent_gdump_add(tsdn, extent);
+ }
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ if (slab) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ extent_slab_set(extent, true);
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+ if (*zero && !extent_zeroed_get(extent)) {
+ void *addr = extent_base_get(extent);
+ size_t size = extent_size_get(extent);
+ if (pages_purge_forced(addr, size)) {
+ memset(addr, 0, size);
+ }
+ }
+
+ return extent;
+label_err:
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ return NULL;
+}
+
+static extent_t *
+extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ assert(size != 0);
+ assert(alignment != 0);
+
+ malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
+
+ extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, new_addr, size, pad, alignment, slab,
+ szind, zero, commit, true);
+ if (extent != NULL) {
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ if (config_prof) {
+ extent_gdump_add(tsdn, extent);
+ }
+ } else if (opt_retain && new_addr == NULL) {
+ extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
+ pad, alignment, slab, szind, zero, commit);
+ /* extent_grow_retained() always releases extent_grow_mtx. */
+ } else {
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ }
+ malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
+
+ return extent;
+}
+
+static extent_t *
+extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ size_t esize = size + pad;
+ extent_t *extent = extent_alloc(tsdn, arena);
+ if (extent == NULL) {
+ return NULL;
+ }
+ void *addr;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
+ alignment, zero, commit);
+ } else {
+ addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
+ esize, alignment, zero, commit, arena_ind_get(arena));
+ }
+ if (addr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ return NULL;
+ }
+ extent_init(extent, arena, addr, esize, slab, szind,
+ arena_extent_sn_next(arena), extent_state_active, zero, commit);
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ if (extent_register(tsdn, extent)) {
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, false);
+ return NULL;
+ }
+
+ return extent;
+}
+
+extent_t *
+extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
+ new_addr, size, pad, alignment, slab, szind, zero, commit);
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
+ new_addr, size, pad, alignment, slab, szind, zero, commit);
+ }
+
+ return extent;
+}
+
+static bool
+extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
+ const extent_t *outer) {
+ assert(extent_arena_get(inner) == arena);
+ if (extent_arena_get(outer) != arena) {
+ return false;
+ }
+
+ assert(extent_state_get(inner) == extent_state_active);
+ if (extent_state_get(outer) != extents->state) {
+ return false;
+ }
+
+ if (extent_committed_get(inner) != extent_committed_get(outer)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
+ bool growing_retained) {
+ assert(extent_can_coalesce(arena, extents, inner, outer));
+
+ if (forward && extents->delay_coalesce) {
+ /*
+ * The extent that remains after coalescing must occupy the
+ * outer extent's position in the LRU. For forward coalescing,
+ * swap the inner extent into the LRU.
+ */
+ extent_list_replace(&extents->lru, outer, inner);
+ }
+ extent_activate_locked(tsdn, arena, extents, outer,
+ extents->delay_coalesce);
+
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
+ forward ? inner : outer, forward ? outer : inner, growing_retained);
+ malloc_mutex_lock(tsdn, &extents->mtx);
+
+ if (err) {
+ if (forward && extents->delay_coalesce) {
+ extent_list_replace(&extents->lru, inner, outer);
+ }
+ extent_deactivate_locked(tsdn, arena, extents, outer,
+ extents->delay_coalesce);
+ }
+
+ return err;
+}
+
+static extent_t *
+extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained) {
+ /*
+ * Continue attempting to coalesce until failure, to protect against
+ * races with other threads that are thwarted by this one.
+ */
+ bool again;
+ do {
+ again = false;
+
+ /* Try to coalesce forward. */
+ extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
+ extent_past_get(extent));
+ if (next != NULL) {
+ /*
+ * extents->mtx only protects against races for
+ * like-state extents, so call extent_can_coalesce()
+ * before releasing next's pool lock.
+ */
+ bool can_coalesce = extent_can_coalesce(arena, extents,
+ extent, next);
+
+ extent_unlock(tsdn, next);
+
+ if (can_coalesce && !extent_coalesce(tsdn, arena,
+ r_extent_hooks, extents, extent, next, true,
+ growing_retained)) {
+ if (extents->delay_coalesce) {
+ /* Do minimal coalescing. */
+ *coalesced = true;
+ return extent;
+ }
+ again = true;
+ }
+ }
+
+ /* Try to coalesce backward. */
+ extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
+ extent_before_get(extent));
+ if (prev != NULL) {
+ bool can_coalesce = extent_can_coalesce(arena, extents,
+ extent, prev);
+ extent_unlock(tsdn, prev);
+
+ if (can_coalesce && !extent_coalesce(tsdn, arena,
+ r_extent_hooks, extents, extent, prev, false,
+ growing_retained)) {
+ extent = prev;
+ if (extents->delay_coalesce) {
+ /* Do minimal coalescing. */
+ *coalesced = true;
+ return extent;
+ }
+ again = true;
+ }
+ }
+ } while (again);
+
+ if (extents->delay_coalesce) {
+ *coalesced = false;
+ }
+ return extent;
+}
+
+static void
+extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent, bool growing_retained) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ assert((extents_state_get(extents) != extent_state_dirty &&
+ extents_state_get(extents) != extent_state_muzzy) ||
+ !extent_zeroed_get(extent));
+
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ extent_szind_set(extent, NSIZES);
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, rtree_ctx, extent);
+ extent_slab_set(extent, false);
+ }
+
+ assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent), true) == extent);
+
+ if (!extents->delay_coalesce) {
+ extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, extent, NULL, growing_retained);
+ }
+
+ extent_deactivate_locked(tsdn, arena, extents, extent, false);
+
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+}
+
+void
+extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (extent_register(tsdn, extent)) {
+ extents_leak(tsdn, arena, &extent_hooks,
+ &arena->extents_retained, extent, false);
+ return;
+ }
+ extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
+}
+
+static bool
+extent_dalloc_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ return extent_dalloc_mmap(addr, size);
+ }
+ return true;
+}
+
+static bool
+extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ return extent_dalloc_default_impl(addr, size);
+}
+
+static bool
+extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ bool err;
+
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ /* Try to deallocate. */
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ err = extent_dalloc_default_impl(extent_base_get(extent),
+ extent_size_get(extent));
+ } else {
+ err = ((*r_extent_hooks)->dalloc == NULL ||
+ (*r_extent_hooks)->dalloc(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent),
+ extent_committed_get(extent), arena_ind_get(arena)));
+ }
+
+ if (!err) {
+ extent_dalloc(tsdn, arena, extent);
+ }
+
+ return err;
+}
+
+void
+extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /*
+ * Deregister first to avoid a race with other allocating threads, and
+ * reregister if deallocation fails.
+ */
+ extent_deregister(tsdn, extent);
+ if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
+ return;
+ }
+
+ extent_reregister(tsdn, extent);
+ /* Try to decommit; purge if that fails. */
+ bool zeroed;
+ if (!extent_committed_get(extent)) {
+ zeroed = true;
+ } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent))) {
+ zeroed = true;
+ } else if ((*r_extent_hooks)->purge_forced != NULL &&
+ !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), 0,
+ extent_size_get(extent), arena_ind_get(arena))) {
+ zeroed = true;
+ } else if (extent_state_get(extent) == extent_state_muzzy ||
+ ((*r_extent_hooks)->purge_lazy != NULL &&
+ !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), 0,
+ extent_size_get(extent), arena_ind_get(arena)))) {
+ zeroed = false;
+ } else {
+ zeroed = false;
+ }
+ extent_zeroed_set(extent, zeroed);
+
+ if (config_prof) {
+ extent_gdump_sub(tsdn, extent);
+ }
+
+ extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
+ extent, false);
+}
+
+static void
+extent_destroy_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ pages_unmap(addr, size);
+ }
+}
+
+static void
+extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ extent_destroy_default_impl(addr, size);
+}
+
+void
+extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* Deregister first to avoid a race with other allocating threads. */
+ extent_deregister(tsdn, extent);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ /* Try to destroy; silently fail otherwise. */
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ extent_destroy_default_impl(extent_base_get(extent),
+ extent_size_get(extent));
+ } else if ((*r_extent_hooks)->destroy != NULL) {
+ (*r_extent_hooks)->destroy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent),
+ extent_committed_get(extent), arena_ind_get(arena));
+ }
+
+ extent_dalloc(tsdn, arena, extent);
+}
+
+static bool
+extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ bool err = ((*r_extent_hooks)->commit == NULL ||
+ (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
+ extent_size_get(extent), offset, length, arena_ind_get(arena)));
+ extent_committed_set(extent, extent_committed_get(extent) || !err);
+ return err;
+}
+
+bool
+extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
+ length, false);
+}
+
+static bool
+extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+bool
+extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ bool err = ((*r_extent_hooks)->decommit == NULL ||
+ (*r_extent_hooks)->decommit(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena)));
+ extent_committed_set(extent, extent_committed_get(extent) && err);
+ return err;
+}
+
+#ifdef PAGES_CAN_PURGE_LAZY
+static bool
+extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+#endif
+
+static bool
+extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ return ((*r_extent_hooks)->purge_lazy == NULL ||
+ (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena)));
+}
+
+bool
+extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
+ offset, length, false);
+}
+
+#ifdef PAGES_CAN_PURGE_FORCED
+static bool
+extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return pages_purge_forced((void *)((uintptr_t)addr +
+ (uintptr_t)offset), length);
+}
+#endif
+
+static bool
+extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ return ((*r_extent_hooks)->purge_forced == NULL ||
+ (*r_extent_hooks)->purge_forced(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena)));
+}
+
+bool
+extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
+ offset, length, false);
+}
+
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool
+extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ return !maps_coalesce;
+}
+#endif
+
+static extent_t *
+extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
+ bool growing_retained) {
+ assert(extent_size_get(extent) == size_a + size_b);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->split == NULL) {
+ return NULL;
+ }
+
+ extent_t *trail = extent_alloc(tsdn, arena);
+ if (trail == NULL) {
+ goto label_error_a;
+ }
+
+ extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
+ size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
+ extent_state_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent));
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
+ {
+ extent_t lead;
+
+ extent_init(&lead, arena, extent_addr_get(extent), size_a,
+ slab_a, szind_a, extent_sn_get(extent),
+ extent_state_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent));
+
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
+ true, &lead_elm_a, &lead_elm_b);
+ }
+ rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
+ &trail_elm_a, &trail_elm_b);
+
+ if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
+ || trail_elm_b == NULL) {
+ goto label_error_b;
+ }
+
+ extent_lock2(tsdn, extent, trail);
+
+ if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
+ size_a + size_b, size_a, size_b, extent_committed_get(extent),
+ arena_ind_get(arena))) {
+ goto label_error_c;
+ }
+
+ extent_size_set(extent, size_a);
+ extent_szind_set(extent, szind_a);
+
+ extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
+ szind_a, slab_a);
+ extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
+ szind_b, slab_b);
+
+ extent_unlock2(tsdn, extent, trail);
+
+ return trail;
+label_error_c:
+ extent_unlock2(tsdn, extent, trail);
+label_error_b:
+ extent_dalloc(tsdn, arena, trail);
+label_error_a:
+ return NULL;
+}
+
+extent_t *
+extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
+ return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
+ szind_a, slab_a, size_b, szind_b, slab_b, false);
+}
+
+static bool
+extent_merge_default_impl(void *addr_a, void *addr_b) {
+ if (!maps_coalesce) {
+ return true;
+ }
+ if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool
+extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ return extent_merge_default_impl(addr_a, addr_b);
+}
+#endif
+
+static bool
+extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->merge == NULL) {
+ return true;
+ }
+
+ bool err;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ err = extent_merge_default_impl(extent_base_get(a),
+ extent_base_get(b));
+ } else {
+ err = (*r_extent_hooks)->merge(*r_extent_hooks,
+ extent_base_get(a), extent_size_get(a), extent_base_get(b),
+ extent_size_get(b), extent_committed_get(a),
+ arena_ind_get(arena));
+ }
+
+ if (err) {
+ return true;
+ }
+
+ /*
+ * The rtree writes must happen while all the relevant elements are
+ * owned, so the following code uses decomposed helper functions rather
+ * than extent_{,de}register() to do things in the right order.
+ */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
+ &a_elm_b);
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
+ &b_elm_b);
+
+ extent_lock2(tsdn, a, b);
+
+ if (a_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
+ NSIZES, false);
+ }
+ if (b_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
+ NSIZES, false);
+ } else {
+ b_elm_b = b_elm_a;
+ }
+
+ extent_size_set(a, extent_size_get(a) + extent_size_get(b));
+ extent_szind_set(a, NSIZES);
+ extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
+ extent_sn_get(a) : extent_sn_get(b));
+ extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+
+ extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
+
+ extent_unlock2(tsdn, a, b);
+
+ extent_dalloc(tsdn, extent_arena_get(b), b);
+
+ return false;
+}
+
+bool
+extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
+ return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
+}
+
+bool
+extent_boot(void) {
+ if (rtree_new(&extents_rtree, true)) {
+ return true;
+ }
+
+ if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
+ WITNESS_RANK_EXTENT_POOL)) {
+ return true;
+ }
+
+ if (have_dss) {
+ extent_dss_boot();
+ }
+
+ return false;
+}
diff --git a/contrib/jemalloc/src/extent_dss.c b/contrib/jemalloc/src/extent_dss.c
new file mode 100644
index 000000000000..e72da95870d4
--- /dev/null
+++ b/contrib/jemalloc/src/extent_dss.c
@@ -0,0 +1,269 @@
+#define JEMALLOC_EXTENT_DSS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/spin.h"
+
+/******************************************************************************/
+/* Data. */
+
+const char *opt_dss = DSS_DEFAULT;
+
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/*
+ * Current dss precedence default, used when creating new arenas. NB: This is
+ * stored as unsigned rather than dss_prec_t because in principle there's no
+ * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
+ * atomic operations to synchronize the setting.
+ */
+static atomic_u_t dss_prec_default = ATOMIC_INIT(
+ (unsigned)DSS_PREC_DEFAULT);
+
+/* Base address of the DSS. */
+static void *dss_base;
+/* Atomic boolean indicating whether a thread is currently extending DSS. */
+static atomic_b_t dss_extending;
+/* Atomic boolean indicating whether the DSS is exhausted. */
+static atomic_b_t dss_exhausted;
+/* Atomic current upper limit on DSS addresses. */
+static atomic_p_t dss_max;
+
+/******************************************************************************/
+
+static void *
+extent_dss_sbrk(intptr_t increment) {
+#ifdef JEMALLOC_DSS
+ return sbrk(increment);
+#else
+ not_implemented();
+ return NULL;
+#endif
+}
+
+dss_prec_t
+extent_dss_prec_get(void) {
+ dss_prec_t ret;
+
+ if (!have_dss) {
+ return dss_prec_disabled;
+ }
+ ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
+ return ret;
+}
+
+bool
+extent_dss_prec_set(dss_prec_t dss_prec) {
+ if (!have_dss) {
+ return (dss_prec != dss_prec_disabled);
+ }
+ atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
+ return false;
+}
+
+static void
+extent_dss_extending_start(void) {
+ spin_t spinner = SPIN_INITIALIZER;
+ while (true) {
+ bool expected = false;
+ if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
+ true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
+ break;
+ }
+ spin_adaptive(&spinner);
+ }
+}
+
+static void
+extent_dss_extending_finish(void) {
+ assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
+
+ atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
+}
+
+static void *
+extent_dss_max_update(void *new_addr) {
+ /*
+ * Get the current end of the DSS as max_cur and assure that dss_max is
+ * up to date.
+ */
+ void *max_cur = extent_dss_sbrk(0);
+ if (max_cur == (void *)-1) {
+ return NULL;
+ }
+ atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
+ /* Fixed new_addr can only be supported if it is at the edge of DSS. */
+ if (new_addr != NULL && max_cur != new_addr) {
+ return NULL;
+ }
+ return max_cur;
+}
+
+void *
+extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit) {
+ extent_t *gap;
+
+ cassert(have_dss);
+ assert(size > 0);
+ assert(alignment > 0);
+
+ /*
+ * sbrk() uses a signed increment argument, so take care not to
+ * interpret a large allocation request as a negative increment.
+ */
+ if ((intptr_t)size < 0) {
+ return NULL;
+ }
+
+ gap = extent_alloc(tsdn, arena);
+ if (gap == NULL) {
+ return NULL;
+ }
+
+ extent_dss_extending_start();
+ if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
+ /*
+ * The loop is necessary to recover from races with other
+ * threads that are using the DSS for something other than
+ * malloc.
+ */
+ while (true) {
+ void *max_cur = extent_dss_max_update(new_addr);
+ if (max_cur == NULL) {
+ goto label_oom;
+ }
+
+ /*
+ * Compute how much page-aligned gap space (if any) is
+ * necessary to satisfy alignment. This space can be
+ * recycled for later use.
+ */
+ void *gap_addr_page = (void *)(PAGE_CEILING(
+ (uintptr_t)max_cur));
+ void *ret = (void *)ALIGNMENT_CEILING(
+ (uintptr_t)gap_addr_page, alignment);
+ size_t gap_size_page = (uintptr_t)ret -
+ (uintptr_t)gap_addr_page;
+ if (gap_size_page != 0) {
+ extent_init(gap, arena, gap_addr_page,
+ gap_size_page, false, NSIZES,
+ arena_extent_sn_next(arena),
+ extent_state_active, false, true);
+ }
+ /*
+ * Compute the address just past the end of the desired
+ * allocation space.
+ */
+ void *dss_next = (void *)((uintptr_t)ret + size);
+ if ((uintptr_t)ret < (uintptr_t)max_cur ||
+ (uintptr_t)dss_next < (uintptr_t)max_cur) {
+ goto label_oom; /* Wrap-around. */
+ }
+ /* Compute the increment, including subpage bytes. */
+ void *gap_addr_subpage = max_cur;
+ size_t gap_size_subpage = (uintptr_t)ret -
+ (uintptr_t)gap_addr_subpage;
+ intptr_t incr = gap_size_subpage + size;
+
+ assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
+ size);
+
+ /* Try to allocate. */
+ void *dss_prev = extent_dss_sbrk(incr);
+ if (dss_prev == max_cur) {
+ /* Success. */
+ atomic_store_p(&dss_max, dss_next,
+ ATOMIC_RELEASE);
+ extent_dss_extending_finish();
+
+ if (gap_size_page != 0) {
+ extent_dalloc_gap(tsdn, arena, gap);
+ } else {
+ extent_dalloc(tsdn, arena, gap);
+ }
+ if (!*commit) {
+ *commit = pages_decommit(ret, size);
+ }
+ if (*zero && *commit) {
+ extent_hooks_t *extent_hooks =
+ EXTENT_HOOKS_INITIALIZER;
+ extent_t extent;
+
+ extent_init(&extent, arena, ret, size,
+ size, false, NSIZES,
+ extent_state_active, false, true);
+ if (extent_purge_forced_wrapper(tsdn,
+ arena, &extent_hooks, &extent, 0,
+ size)) {
+ memset(ret, 0, size);
+ }
+ }
+ return ret;
+ }
+ /*
+ * Failure, whether due to OOM or a race with a raw
+ * sbrk() call from outside the allocator.
+ */
+ if (dss_prev == (void *)-1) {
+ /* OOM. */
+ atomic_store_b(&dss_exhausted, true,
+ ATOMIC_RELEASE);
+ goto label_oom;
+ }
+ }
+ }
+label_oom:
+ extent_dss_extending_finish();
+ extent_dalloc(tsdn, arena, gap);
+ return NULL;
+}
+
+static bool
+extent_in_dss_helper(void *addr, void *max) {
+ return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
+ (uintptr_t)max);
+}
+
+bool
+extent_in_dss(void *addr) {
+ cassert(have_dss);
+
+ return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
+ ATOMIC_ACQUIRE));
+}
+
+bool
+extent_dss_mergeable(void *addr_a, void *addr_b) {
+ void *max;
+
+ cassert(have_dss);
+
+ if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
+ (uintptr_t)dss_base) {
+ return true;
+ }
+
+ max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
+ return (extent_in_dss_helper(addr_a, max) ==
+ extent_in_dss_helper(addr_b, max));
+}
+
+void
+extent_dss_boot(void) {
+ cassert(have_dss);
+
+ dss_base = extent_dss_sbrk(0);
+ atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
+ atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
+ atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
+}
+
+/******************************************************************************/
diff --git a/contrib/jemalloc/src/extent_mmap.c b/contrib/jemalloc/src/extent_mmap.c
new file mode 100644
index 000000000000..8d607dc80392
--- /dev/null
+++ b/contrib/jemalloc/src/extent_mmap.c
@@ -0,0 +1,42 @@
+#define JEMALLOC_EXTENT_MMAP_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_retain =
+#ifdef JEMALLOC_RETAIN
+ true
+#else
+ false
+#endif
+ ;
+
+/******************************************************************************/
+
+void *
+extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit) {
+ void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment,
+ PAGE), commit);
+ if (ret == NULL) {
+ return NULL;
+ }
+ assert(ret != NULL);
+ if (*commit) {
+ *zero = true;
+ }
+ return ret;
+}
+
+bool
+extent_dalloc_mmap(void *addr, size_t size) {
+ if (!opt_retain) {
+ pages_unmap(addr, size);
+ }
+ return opt_retain;
+}
diff --git a/contrib/jemalloc/src/hash.c b/contrib/jemalloc/src/hash.c
index cfa4da0275cb..7b2bdc2bd6f4 100644
--- a/contrib/jemalloc/src/hash.c
+++ b/contrib/jemalloc/src/hash.c
@@ -1,2 +1,3 @@
-#define JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/hooks.c b/contrib/jemalloc/src/hooks.c
new file mode 100644
index 000000000000..6266ecd47fec
--- /dev/null
+++ b/contrib/jemalloc/src/hooks.c
@@ -0,0 +1,12 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+/*
+ * The hooks are a little bit screwy -- they're not genuinely exported in the
+ * sense that we want them available to end-users, but we do want them visible
+ * from outside the generated library, so that we can use them in test code.
+ */
+JEMALLOC_EXPORT
+void (*hooks_arena_new_hook)() = NULL;
+
+JEMALLOC_EXPORT
+void (*hooks_libc_hook)() = NULL;
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
deleted file mode 100644
index 0fbaa41a1ecf..000000000000
--- a/contrib/jemalloc/src/huge.c
+++ /dev/null
@@ -1,498 +0,0 @@
-#define JEMALLOC_HUGE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-
-static extent_node_t *
-huge_node_get(const void *ptr)
-{
- extent_node_t *node;
-
- node = chunk_lookup(ptr, true);
- assert(!extent_node_achunk_get(node));
-
- return (node);
-}
-
-static bool
-huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
-{
-
- assert(extent_node_addr_get(node) == ptr);
- assert(!extent_node_achunk_get(node));
- return (chunk_register(ptr, node, gdump));
-}
-
-static void
-huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
-{
- bool err;
-
- err = huge_node_set(tsdn, ptr, node, gdump);
- assert(!err);
-}
-
-static void
-huge_node_unset(const void *ptr, const extent_node_t *node)
-{
-
- chunk_deregister(ptr, node);
-}
-
-void *
-huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
-{
-
- assert(usize == s2u(usize));
-
- return (huge_palloc(tsdn, arena, usize, chunksize, zero));
-}
-
-void *
-huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero)
-{
- void *ret;
- size_t ausize;
- arena_t *iarena;
- extent_node_t *node;
- size_t sn;
- bool is_zeroed, gdump;
-
- /* Allocate one or more contiguous chunks for this request. */
-
- assert(!tsdn_null(tsdn) || arena != NULL);
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
-
- ausize = sa2u(usize, alignment);
- if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
- return (NULL);
- assert(ausize >= chunksize);
-
- /* Allocate an extent node with which to track the chunk. */
- iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
- a0get();
- node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
- CACHELINE, false, NULL, true, iarena);
- if (node == NULL)
- return (NULL);
-
- /*
- * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
- * it is possible to make correct junk/zero fill decisions below.
- */
- is_zeroed = zero;
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
- arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
- idalloctm(tsdn, node, NULL, true, true);
- return (NULL);
- }
-
- extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
-
- if (huge_node_set(tsdn, ret, node, &gdump)) {
- arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
- idalloctm(tsdn, node, NULL, true, true);
- return (NULL);
- }
- if (config_prof && opt_prof && gdump)
- prof_gdump(tsdn);
-
- /* Insert node into huge. */
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- ql_elm_new(node, ql_link);
- ql_tail_insert(&arena->huge, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
-
- if (zero || (config_fill && unlikely(opt_zero))) {
- if (!is_zeroed)
- memset(ret, 0, usize);
- } else if (config_fill && unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
-
- arena_decay_tick(tsdn, arena);
- return (ret);
-}
-
-#ifdef JEMALLOC_JET
-#undef huge_dalloc_junk
-#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
-#endif
-static void
-huge_dalloc_junk(void *ptr, size_t usize)
-{
-
- if (config_fill && have_dss && unlikely(opt_junk_free)) {
- /*
- * Only bother junk filling if the chunk isn't about to be
- * unmapped.
- */
- if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
- }
-}
-#ifdef JEMALLOC_JET
-#undef huge_dalloc_junk
-#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
-huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
-#endif
-
-static void
-huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero)
-{
- size_t usize, usize_next;
- extent_node_t *node;
- arena_t *arena;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- bool pre_zeroed, post_zeroed, gdump;
-
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
-
- /* Increase usize to incorporate extra. */
- for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
- <= oldsize; usize = usize_next)
- ; /* Do nothing. */
-
- if (oldsize == usize)
- return;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- pre_zeroed = extent_node_zeroed_get(node);
-
- /* Fill if necessary (shrinking). */
- if (oldsize > usize) {
- size_t sdiff = oldsize - usize;
- if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize),
- JEMALLOC_FREE_JUNK, sdiff);
- post_zeroed = false;
- } else {
- post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
- sdiff);
- }
- } else
- post_zeroed = pre_zeroed;
-
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- /* Update the size of the huge allocation. */
- huge_node_unset(ptr, node);
- assert(extent_node_size_get(node) != usize);
- extent_node_size_set(node, usize);
- huge_node_reset(tsdn, ptr, node, &gdump);
- /* Update zeroed. */
- extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- /* gdump without any locks held. */
- if (config_prof && opt_prof && gdump)
- prof_gdump(tsdn);
-
- arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
-
- /* Fill if necessary (growing). */
- if (oldsize < usize) {
- if (zero || (config_fill && unlikely(opt_zero))) {
- if (!pre_zeroed) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- usize - oldsize);
- }
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK, usize - oldsize);
- }
- }
-}
-
-static bool
-huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize)
-{
- extent_node_t *node;
- arena_t *arena;
- chunk_hooks_t chunk_hooks;
- size_t cdiff;
- bool pre_zeroed, post_zeroed, gdump;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- pre_zeroed = extent_node_zeroed_get(node);
- chunk_hooks = chunk_hooks_get(tsdn, arena);
-
- assert(oldsize > usize);
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
-
- /* Split excess chunks. */
- cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
- if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
- CHUNK_CEILING(usize), cdiff, true, arena->ind))
- return (true);
-
- if (oldsize > usize) {
- size_t sdiff = oldsize - usize;
- if (config_fill && unlikely(opt_junk_free)) {
- huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
- sdiff);
- post_zeroed = false;
- } else {
- post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
- usize), CHUNK_CEILING(oldsize),
- CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
- }
- } else
- post_zeroed = pre_zeroed;
-
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- /* Update the size of the huge allocation. */
- huge_node_unset(ptr, node);
- extent_node_size_set(node, usize);
- huge_node_reset(tsdn, ptr, node, &gdump);
- /* Update zeroed. */
- extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- /* gdump without any locks held. */
- if (config_prof && opt_prof && gdump)
- prof_gdump(tsdn);
-
- /* Zap the excess chunks. */
- arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
- extent_node_sn_get(node));
-
- return (false);
-}
-
-static bool
-huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize, bool zero) {
- extent_node_t *node;
- arena_t *arena;
- bool is_zeroed_subchunk, is_zeroed_chunk, gdump;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- is_zeroed_subchunk = extent_node_zeroed_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
-
- /* prof_gdump() requirement. */
- witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
-
- /*
- * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
- * update extent's zeroed field, and zero as necessary.
- */
- is_zeroed_chunk = false;
- if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
- &is_zeroed_chunk))
- return (true);
-
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- huge_node_unset(ptr, node);
- extent_node_size_set(node, usize);
- extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
- is_zeroed_chunk);
- huge_node_reset(tsdn, ptr, node, &gdump);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- /* gdump without any locks held. */
- if (config_prof && opt_prof && gdump)
- prof_gdump(tsdn);
-
- if (zero || (config_fill && unlikely(opt_zero))) {
- if (!is_zeroed_subchunk) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- CHUNK_CEILING(oldsize) - oldsize);
- }
- if (!is_zeroed_chunk) {
- memset((void *)((uintptr_t)ptr +
- CHUNK_CEILING(oldsize)), 0, usize -
- CHUNK_CEILING(oldsize));
- }
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
- usize - oldsize);
- }
-
- return (false);
-}
-
-bool
-huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
- size_t usize_max, bool zero)
-{
-
- assert(s2u(oldsize) == oldsize);
- /* The following should have been caught by callers. */
- assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
-
- /* Both allocations must be huge to avoid a move. */
- if (oldsize < chunksize || usize_max < chunksize)
- return (true);
-
- if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
- /* Attempt to expand the allocation in-place. */
- if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
- zero)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
- return (false);
- }
- /* Try again, this time with usize_min. */
- if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
- CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
- ptr, oldsize, usize_min, zero)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
- return (false);
- }
- }
-
- /*
- * Avoid moving the allocation if the existing chunk size accommodates
- * the new size.
- */
- if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
- && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
- huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
- usize_max, zero);
- arena_decay_tick(tsdn, huge_aalloc(ptr));
- return (false);
- }
-
- /* Attempt to shrink the allocation in-place. */
- if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
- if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
- usize_max)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
- return (false);
- }
- }
- return (true);
-}
-
-static void *
-huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero)
-{
-
- if (alignment <= chunksize)
- return (huge_malloc(tsdn, arena, usize, zero));
- return (huge_palloc(tsdn, arena, usize, alignment, zero));
-}
-
-void *
-huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t usize, size_t alignment, bool zero, tcache_t *tcache)
-{
- void *ret;
- size_t copysize;
-
- /* The following should have been caught by callers. */
- assert(usize > 0 && usize <= HUGE_MAXCLASS);
-
- /* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
- zero))
- return (ptr);
-
- /*
- * usize and oldsize are different enough that we need to use a
- * different size class. In that case, fall back to allocating new
- * space and copying.
- */
- ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
- zero);
- if (ret == NULL)
- return (NULL);
-
- copysize = (usize < oldsize) ? usize : oldsize;
- memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
- return (ret);
-}
-
-void
-huge_dalloc(tsdn_t *tsdn, void *ptr)
-{
- extent_node_t *node;
- arena_t *arena;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- huge_node_unset(ptr, node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- ql_remove(&arena->huge, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
-
- huge_dalloc_junk(extent_node_addr_get(node),
- extent_node_size_get(node));
- arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
- extent_node_addr_get(node), extent_node_size_get(node),
- extent_node_sn_get(node));
- idalloctm(tsdn, node, NULL, true, true);
-
- arena_decay_tick(tsdn, arena);
-}
-
-arena_t *
-huge_aalloc(const void *ptr)
-{
-
- return (extent_node_arena_get(huge_node_get(ptr)));
-}
-
-size_t
-huge_salloc(tsdn_t *tsdn, const void *ptr)
-{
- size_t size;
- extent_node_t *node;
- arena_t *arena;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- size = extent_node_size_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
-
- return (size);
-}
-
-prof_tctx_t *
-huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
-{
- prof_tctx_t *tctx;
- extent_node_t *node;
- arena_t *arena;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- tctx = extent_node_prof_tctx_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
-
- return (tctx);
-}
-
-void
-huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
-{
- extent_node_t *node;
- arena_t *arena;
-
- node = huge_node_get(ptr);
- arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- extent_node_prof_tctx_set(node, tctx);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
-}
-
-void
-huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
-{
-
- huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
-}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index fcfe204a9bef..868c9e867c9d 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -1,5 +1,21 @@
-#define JEMALLOC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
@@ -21,6 +37,13 @@ bool opt_abort =
false
#endif
;
+bool opt_abort_conf =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
const char *opt_junk =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
"true"
@@ -43,20 +66,15 @@ bool opt_junk_free =
#endif
;
-size_t opt_quarantine = ZU(0);
-bool opt_redzone = false;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
unsigned opt_narenas = 0;
-/* Initialized to true if the process is running inside Valgrind. */
-bool in_valgrind;
-
unsigned ncpus;
/* Protects arenas initialization. */
-static malloc_mutex_t arenas_lock;
+malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -64,9 +82,12 @@ static malloc_mutex_t arenas_lock;
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
+ *
+ * Points to an arena_t.
*/
-arena_t **arenas;
-static unsigned narenas_total; /* Use narenas_total_*(). */
+JEMALLOC_ALIGNED(CACHELINE)
+atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
+static atomic_u_t narenas_total; /* Use narenas_total_*(). */
static arena_t *a0; /* arenas[0]; read-only after initialization. */
unsigned narenas_auto; /* Read-only after initialization. */
@@ -79,123 +100,18 @@ typedef enum {
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
/* False should be the common case. Set to true to trigger initialization. */
-static bool malloc_slow = true;
+bool malloc_slow = true;
/* When malloc_slow is true, set the corresponding bits for sanity check. */
enum {
flag_opt_junk_alloc = (1U),
flag_opt_junk_free = (1U << 1),
- flag_opt_quarantine = (1U << 2),
- flag_opt_zero = (1U << 3),
- flag_opt_utrace = (1U << 4),
- flag_in_valgrind = (1U << 5),
- flag_opt_xmalloc = (1U << 6)
+ flag_opt_zero = (1U << 2),
+ flag_opt_utrace = (1U << 3),
+ flag_opt_xmalloc = (1U << 4)
};
static uint8_t malloc_slow_flags;
-JEMALLOC_ALIGNED(CACHELINE)
-const size_t pind2sz_tab[NPSIZES] = {
-#define PSZ_yes(lg_grp, ndelta, lg_delta) \
- (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
-#define PSZ_no(lg_grp, ndelta, lg_delta)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
- PSZ_##psz(lg_grp, ndelta, lg_delta)
- SIZE_CLASSES
-#undef PSZ_yes
-#undef PSZ_no
-#undef SC
-};
-
-JEMALLOC_ALIGNED(CACHELINE)
-const size_t index2size_tab[NSIZES] = {
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
- ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
- SIZE_CLASSES
-#undef SC
-};
-
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t size2index_tab[] = {
-#if LG_TINY_MIN == 0
-#warning "Dangerous LG_TINY_MIN"
-#define S2B_0(i) i,
-#elif LG_TINY_MIN == 1
-#warning "Dangerous LG_TINY_MIN"
-#define S2B_1(i) i,
-#elif LG_TINY_MIN == 2
-#warning "Dangerous LG_TINY_MIN"
-#define S2B_2(i) i,
-#elif LG_TINY_MIN == 3
-#define S2B_3(i) i,
-#elif LG_TINY_MIN == 4
-#define S2B_4(i) i,
-#elif LG_TINY_MIN == 5
-#define S2B_5(i) i,
-#elif LG_TINY_MIN == 6
-#define S2B_6(i) i,
-#elif LG_TINY_MIN == 7
-#define S2B_7(i) i,
-#elif LG_TINY_MIN == 8
-#define S2B_8(i) i,
-#elif LG_TINY_MIN == 9
-#define S2B_9(i) i,
-#elif LG_TINY_MIN == 10
-#define S2B_10(i) i,
-#elif LG_TINY_MIN == 11
-#define S2B_11(i) i,
-#else
-#error "Unsupported LG_TINY_MIN"
-#endif
-#if LG_TINY_MIN < 1
-#define S2B_1(i) S2B_0(i) S2B_0(i)
-#endif
-#if LG_TINY_MIN < 2
-#define S2B_2(i) S2B_1(i) S2B_1(i)
-#endif
-#if LG_TINY_MIN < 3
-#define S2B_3(i) S2B_2(i) S2B_2(i)
-#endif
-#if LG_TINY_MIN < 4
-#define S2B_4(i) S2B_3(i) S2B_3(i)
-#endif
-#if LG_TINY_MIN < 5
-#define S2B_5(i) S2B_4(i) S2B_4(i)
-#endif
-#if LG_TINY_MIN < 6
-#define S2B_6(i) S2B_5(i) S2B_5(i)
-#endif
-#if LG_TINY_MIN < 7
-#define S2B_7(i) S2B_6(i) S2B_6(i)
-#endif
-#if LG_TINY_MIN < 8
-#define S2B_8(i) S2B_7(i) S2B_7(i)
-#endif
-#if LG_TINY_MIN < 9
-#define S2B_9(i) S2B_8(i) S2B_8(i)
-#endif
-#if LG_TINY_MIN < 10
-#define S2B_10(i) S2B_9(i) S2B_9(i)
-#endif
-#if LG_TINY_MIN < 11
-#define S2B_11(i) S2B_10(i) S2B_10(i)
-#endif
-#define S2B_no(i)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
- S2B_##lg_delta_lookup(index)
- SIZE_CLASSES
-#undef S2B_3
-#undef S2B_4
-#undef S2B_5
-#undef S2B_6
-#undef S2B_7
-#undef S2B_8
-#undef S2B_9
-#undef S2B_10
-#undef S2B_11
-#undef S2B_no
-#undef SC
-};
-
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
# define NO_INITIALIZER ((unsigned long)0)
@@ -219,19 +135,21 @@ static bool init_lock_initialized = false;
JEMALLOC_ATTR(constructor)
static void WINAPI
-_init_init_lock(void)
-{
-
- /* If another constructor in the same binary is using mallctl to
- * e.g. setup chunk hooks, it may end up running before this one,
- * and malloc_init_hard will crash trying to lock the uninitialized
- * lock. So we force an initialization of the lock in
- * malloc_init_hard as well. We don't try to care about atomicity
- * of the accessed to the init_lock_initialized boolean, since it
- * really only matters early in the process creation, before any
- * separate thread normally starts doing anything. */
- if (!init_lock_initialized)
- malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
+_init_init_lock(void) {
+ /*
+ * If another constructor in the same binary is using mallctl to e.g.
+ * set up extent hooks, it may end up running before this one, and
+ * malloc_init_hard will crash trying to lock the uninitialized lock. So
+ * we force an initialization of the lock in malloc_init_hard as well.
+ * We don't try to care about atomicity of the accessed to the
+ * init_lock_initialized boolean, since it really only matters early in
+ * the process creation, before any separate thread normally starts
+ * doing anything.
+ */
+ if (!init_lock_initialized) {
+ malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
+ malloc_mutex_rank_exclusive);
+ }
init_lock_initialized = true;
}
@@ -267,6 +185,9 @@ typedef struct {
# define UTRACE(a, b, c)
#endif
+/* Whether encountered any invalid config options. */
+static bool had_conf_error = false;
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
@@ -281,48 +202,25 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_initialized(void)
-{
-
+bool
+malloc_initialized(void) {
return (malloc_init_state == malloc_init_initialized);
}
-JEMALLOC_ALWAYS_INLINE_C void
-malloc_thread_init(void)
-{
-
- /*
- * TSD initialization can't be safely done as a side effect of
- * deallocation, because it is possible for a thread to do nothing but
- * deallocate its TLS data via free(), in which case writing to TLS
- * would cause write-after-free memory corruption. The quarantine
- * facility *only* gets used as a side effect of deallocation, so make
- * a best effort attempt at initializing its TSD by hooking all
- * allocation events.
- */
- if (config_fill && unlikely(opt_quarantine))
- quarantine_alloc_hook();
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init_a0(void)
-{
-
- if (unlikely(malloc_init_state == malloc_init_uninitialized))
- return (malloc_init_hard_a0());
- return (false);
+JEMALLOC_ALWAYS_INLINE bool
+malloc_init_a0(void) {
+ if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
+ return malloc_init_hard_a0();
+ }
+ return false;
}
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
-
- if (unlikely(!malloc_initialized()) && malloc_init_hard())
- return (true);
- malloc_thread_init();
-
- return (false);
+JEMALLOC_ALWAYS_INLINE bool
+malloc_init(void) {
+ if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
+ return true;
+ }
+ return false;
}
/*
@@ -331,41 +229,27 @@ malloc_init(void)
*/
static void *
-a0ialloc(size_t size, bool zero, bool is_metadata)
-{
-
- if (unlikely(malloc_init_a0()))
- return (NULL);
+a0ialloc(size_t size, bool zero, bool is_internal) {
+ if (unlikely(malloc_init_a0())) {
+ return NULL;
+ }
- return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
- is_metadata, arena_get(TSDN_NULL, 0, true), true));
+ return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
+ is_internal, arena_get(TSDN_NULL, 0, true), true);
}
static void
-a0idalloc(void *ptr, bool is_metadata)
-{
-
- idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
-}
-
-arena_t *
-a0get(void)
-{
-
- return (a0);
+a0idalloc(void *ptr, bool is_internal) {
+ idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
}
void *
-a0malloc(size_t size)
-{
-
- return (a0ialloc(size, false, true));
+a0malloc(size_t size) {
+ return a0ialloc(size, false, true);
}
void
-a0dalloc(void *ptr)
-{
-
+a0dalloc(void *ptr) {
a0idalloc(ptr, true);
}
@@ -376,18 +260,16 @@ a0dalloc(void *ptr)
*/
void *
-bootstrap_malloc(size_t size)
-{
-
- if (unlikely(size == 0))
+bootstrap_malloc(size_t size) {
+ if (unlikely(size == 0)) {
size = 1;
+ }
- return (a0ialloc(size, false, false));
+ return a0ialloc(size, false, false);
}
void *
-bootstrap_calloc(size_t num, size_t size)
-{
+bootstrap_calloc(size_t num, size_t size) {
size_t num_size;
num_size = num * size;
@@ -396,58 +278,50 @@ bootstrap_calloc(size_t num, size_t size)
num_size = 1;
}
- return (a0ialloc(num_size, true, false));
+ return a0ialloc(num_size, true, false);
}
void
-bootstrap_free(void *ptr)
-{
-
- if (unlikely(ptr == NULL))
+bootstrap_free(void *ptr) {
+ if (unlikely(ptr == NULL)) {
return;
+ }
a0idalloc(ptr, false);
}
-static void
-arena_set(unsigned ind, arena_t *arena)
-{
-
- atomic_write_p((void **)&arenas[ind], arena);
+void
+arena_set(unsigned ind, arena_t *arena) {
+ atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
}
static void
-narenas_total_set(unsigned narenas)
-{
-
- atomic_write_u(&narenas_total, narenas);
+narenas_total_set(unsigned narenas) {
+ atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
}
static void
-narenas_total_inc(void)
-{
-
- atomic_add_u(&narenas_total, 1);
+narenas_total_inc(void) {
+ atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
}
unsigned
-narenas_total_get(void)
-{
-
- return (atomic_read_u(&narenas_total));
+narenas_total_get(void) {
+ return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
}
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind)
-{
+arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
assert(ind <= narenas_total_get());
- if (ind > MALLOCX_ARENA_MAX)
- return (NULL);
- if (ind == narenas_total_get())
+ if (ind >= MALLOCX_ARENA_LIMIT) {
+ return NULL;
+ }
+ if (ind == narenas_total_get()) {
narenas_total_inc();
+ }
/*
* Another thread may have already initialized arenas[ind] if it's an
@@ -456,46 +330,60 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind)
arena = arena_get(tsdn, ind, false);
if (arena != NULL) {
assert(ind < narenas_auto);
- return (arena);
+ return arena;
}
/* Actually initialize the arena. */
- arena = arena_new(tsdn, ind);
- arena_set(ind, arena);
- return (arena);
+ arena = arena_new(tsdn, ind, extent_hooks);
+
+ return arena;
+}
+
+static void
+arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
+ if (ind == 0) {
+ return;
+ }
+ if (have_background_thread) {
+ bool err;
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ err = background_thread_create(tsdn_tsd(tsdn), ind);
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ if (err) {
+ malloc_printf("<jemalloc>: error in background thread "
+ "creation for arena %u. Abort.\n", ind);
+ abort();
+ }
+ }
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind)
-{
+arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind);
+ arena = arena_init_locked(tsdn, ind, extent_hooks);
malloc_mutex_unlock(tsdn, &arenas_lock);
- return (arena);
-}
-static void
-arena_bind(tsd_t *tsd, unsigned ind, bool internal)
-{
- arena_t *arena;
+ arena_new_create_background_thread(tsdn, ind);
- if (!tsd_nominal(tsd))
- return;
+ return arena;
+}
- arena = arena_get(tsd_tsdn(tsd), ind, false);
+static void
+arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_inc(arena, internal);
- if (internal)
+ if (internal) {
tsd_iarena_set(tsd, arena);
- else
+ } else {
tsd_arena_set(tsd, arena);
+ }
}
void
-arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
-{
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
arena_t *oldarena, *newarena;
oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
@@ -506,21 +394,21 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
}
static void
-arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
-{
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
arena_t *arena;
arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_dec(arena, internal);
- if (internal)
+
+ if (internal) {
tsd_iarena_set(tsd, NULL);
- else
+ } else {
tsd_arena_set(tsd, NULL);
+ }
}
arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
-{
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata, *arenas_tdata_old;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
unsigned narenas_tdata_old, i;
@@ -567,7 +455,7 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
* Copy to tdata array. It's possible that the actual number of arenas
* has increased since narenas_total_get() was called above, but that
* causes no correctness issues unless two threads concurrently execute
- * the arenas.extend mallctl, which we trust mallctl synchronization to
+ * the arenas.create mallctl, which we trust mallctl synchronization to
* prevent.
*/
@@ -589,19 +477,30 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
/* Read the refreshed tdata array. */
tdata = &arenas_tdata[ind];
label_return:
- if (arenas_tdata_old != NULL)
+ if (arenas_tdata_old != NULL) {
a0dalloc(arenas_tdata_old);
- return (tdata);
+ }
+ return tdata;
}
/* Slow path, called only by arena_choose(). */
arena_t *
-arena_choose_hard(tsd_t *tsd, bool internal)
-{
+arena_choose_hard(tsd_t *tsd, bool internal) {
arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
+ unsigned choose = percpu_arena_choose();
+ ret = arena_get(tsd_tsdn(tsd), choose, true);
+ assert(ret != NULL);
+ arena_bind(tsd, arena_ind_get(ret), false);
+ arena_bind(tsd, arena_ind_get(ret), true);
+
+ return ret;
+ }
+
if (narenas_auto > 1) {
unsigned i, j, choose[2], first_null;
+ bool is_new_arena[2];
/*
* Determine binding for both non-internal and internal
@@ -611,8 +510,10 @@ arena_choose_hard(tsd_t *tsd, bool internal)
* choose[1]: For internal metadata allocation.
*/
- for (j = 0; j < 2; j++)
+ for (j = 0; j < 2; j++) {
choose[j] = 0;
+ is_new_arena[j] = false;
+ }
first_null = narenas_auto;
malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
@@ -628,8 +529,9 @@ arena_choose_hard(tsd_t *tsd, bool internal)
tsd_tsdn(tsd), i, false), !!j) <
arena_nthreads_get(arena_get(
tsd_tsdn(tsd), choose[j], false),
- !!j))
+ !!j)) {
choose[j] = i;
+ }
}
} else if (first_null == narenas_auto) {
/*
@@ -663,64 +565,61 @@ arena_choose_hard(tsd_t *tsd, bool internal)
/* Initialize a new arena. */
choose[j] = first_null;
arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j]);
+ choose[j],
+ (extent_hooks_t *)&extent_hooks_default);
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
- return (NULL);
+ return NULL;
}
- if (!!j == internal)
+ is_new_arena[j] = true;
+ if (!!j == internal) {
ret = arena;
+ }
}
arena_bind(tsd, choose[j], !!j);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+
+ for (j = 0; j < 2; j++) {
+ if (is_new_arena[j]) {
+ assert(choose[j] > 0);
+ arena_new_create_background_thread(
+ tsd_tsdn(tsd), choose[j]);
+ }
+ }
+
} else {
ret = arena_get(tsd_tsdn(tsd), 0, false);
arena_bind(tsd, 0, false);
arena_bind(tsd, 0, true);
}
- return (ret);
+ return ret;
}
void
-thread_allocated_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
-void
-thread_deallocated_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
-void
-iarena_cleanup(tsd_t *tsd)
-{
+iarena_cleanup(tsd_t *tsd) {
arena_t *iarena;
iarena = tsd_iarena_get(tsd);
- if (iarena != NULL)
- arena_unbind(tsd, iarena->ind, true);
+ if (iarena != NULL) {
+ arena_unbind(tsd, arena_ind_get(iarena), true);
+ }
}
void
-arena_cleanup(tsd_t *tsd)
-{
+arena_cleanup(tsd_t *tsd) {
arena_t *arena;
arena = tsd_arena_get(tsd);
- if (arena != NULL)
- arena_unbind(tsd, arena->ind, false);
+ if (arena != NULL) {
+ arena_unbind(tsd, arena_ind_get(arena), false);
+ }
}
void
-arenas_tdata_cleanup(tsd_t *tsd)
-{
+arenas_tdata_cleanup(tsd_t *tsd) {
arena_tdata_t *arenas_tdata;
/* Prevent tsd->arenas_tdata from being (re)created. */
@@ -733,25 +632,9 @@ arenas_tdata_cleanup(tsd_t *tsd)
}
}
-void
-narenas_tdata_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
-void
-arenas_tdata_bypass_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
static void
-stats_print_atexit(void)
-{
-
- if (config_tcache && config_stats) {
+stats_print_atexit(void) {
+ if (config_stats) {
tsdn_t *tsdn;
unsigned narenas, i;
@@ -769,21 +652,41 @@ stats_print_atexit(void)
if (arena != NULL) {
tcache_t *tcache;
- /*
- * tcache_stats_merge() locks bins, so if any
- * code is introduced that acquires both arena
- * and bin locks in the opposite order,
- * deadlocks may result.
- */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_foreach(tcache, &arena->tcache_ql, link) {
tcache_stats_merge(tsdn, tcache, arena);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(tsdn,
+ &arena->tcache_ql_mtx);
}
}
}
- je_malloc_stats_print(NULL, NULL, NULL);
+ je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
+}
+
+/*
+ * Ensure that we don't hold any locks upon entry to or exit from allocator
+ * code (in a "broad" sense that doesn't count a reentrant allocation as an
+ * entrance or exit).
+ */
+JEMALLOC_ALWAYS_INLINE void
+check_entry_exit_locking(tsdn_t *tsdn) {
+ if (!config_debug) {
+ return;
+ }
+ if (tsdn_null(tsdn)) {
+ return;
+ }
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ /*
+ * It's possible we hold locks at entry/exit if we're in a nested
+ * allocation.
+ */
+ int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (reentrancy_level != 0) {
+ return;
+ }
+ witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
}
/*
@@ -795,22 +698,21 @@ stats_print_atexit(void)
*/
static char *
-jemalloc_secure_getenv(const char *name)
-{
+jemalloc_secure_getenv(const char *name) {
#ifdef JEMALLOC_HAVE_SECURE_GETENV
return secure_getenv(name);
#else
# ifdef JEMALLOC_HAVE_ISSETUGID
- if (issetugid() != 0)
- return (NULL);
+ if (issetugid() != 0) {
+ return NULL;
+ }
# endif
- return (getenv(name));
+ return getenv(name);
#endif
}
static unsigned
-malloc_ncpus(void)
-{
+malloc_ncpus(void) {
long result;
#ifdef _WIN32
@@ -837,10 +739,34 @@ malloc_ncpus(void)
return ((result == -1) ? 1 : (unsigned)result);
}
+static void
+init_opt_stats_print_opts(const char *v, size_t vlen) {
+ size_t opts_len = strlen(opt_stats_print_opts);
+ assert(opts_len <= stats_print_tot_num_options);
+
+ for (size_t i = 0; i < vlen; i++) {
+ switch (v[i]) {
+#define OPTION(o, v, d, s) case o: break;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ default: continue;
+ }
+
+ if (strchr(opt_stats_print_opts, v[i]) != NULL) {
+ /* Ignore repeated. */
+ continue;
+ }
+
+ opt_stats_print_opts[opts_len++] = v[i];
+ opt_stats_print_opts[opts_len] = '\0';
+ assert(opts_len <= stats_print_tot_num_options);
+ }
+ assert(opts_len == strlen(opt_stats_print_opts));
+}
+
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
- char const **v_p, size_t *vlen_p)
-{
+ char const **v_p, size_t *vlen_p) {
bool accept;
const char *opts = *opts_p;
@@ -874,10 +800,10 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
}
- return (true);
+ return true;
default:
malloc_write("<jemalloc>: Malformed conf string\n");
- return (true);
+ return true;
}
}
@@ -910,64 +836,50 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
}
*opts_p = opts;
- return (false);
+ return false;
}
static void
-malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
- size_t vlen)
-{
+malloc_abort_invalid_conf(void) {
+ assert(opt_abort_conf);
+ malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
+ "value (see above).\n");
+ abort();
+}
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+ size_t vlen) {
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
(int)vlen, v);
+ had_conf_error = true;
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ }
}
static void
-malloc_slow_flag_init(void)
-{
+malloc_slow_flag_init(void) {
/*
* Combine the runtime options into malloc_slow for fast path. Called
* after processing all the options.
*/
malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
| (opt_junk_free ? flag_opt_junk_free : 0)
- | (opt_quarantine ? flag_opt_quarantine : 0)
| (opt_zero ? flag_opt_zero : 0)
| (opt_utrace ? flag_opt_utrace : 0)
| (opt_xmalloc ? flag_opt_xmalloc : 0);
- if (config_valgrind)
- malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
-
malloc_slow = (malloc_slow_flags != 0);
}
static void
-malloc_conf_init(void)
-{
+malloc_conf_init(void) {
unsigned i;
char buf[PATH_MAX + 1];
const char *opts, *k, *v;
size_t klen, vlen;
- /*
- * Automatically configure valgrind before processing options. The
- * valgrind option remains in jemalloc 3.x for compatibility reasons.
- */
- if (config_valgrind) {
- in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
- if (config_fill && unlikely(in_valgrind)) {
- opt_junk = "false";
- opt_junk_alloc = false;
- opt_junk_free = false;
- assert(!opt_zero);
- opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
- opt_redzone = true;
- }
- if (config_tcache && unlikely(in_valgrind))
- opt_tcache = false;
- }
-
for (i = 0; i < 4; i++) {
/* Get runtime configuration. */
switch (i) {
@@ -1043,29 +955,28 @@ malloc_conf_init(void)
while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
&vlen)) {
-#define CONF_MATCH(n) \
+#define CONF_MATCH(n) \
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
-#define CONF_MATCH_VALUE(n) \
+#define CONF_MATCH_VALUE(n) \
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
-#define CONF_HANDLE_BOOL(o, n, cont) \
+#define CONF_HANDLE_BOOL(o, n) \
if (CONF_MATCH(n)) { \
- if (CONF_MATCH_VALUE("true")) \
+ if (CONF_MATCH_VALUE("true")) { \
o = true; \
- else if (CONF_MATCH_VALUE("false")) \
+ } else if (CONF_MATCH_VALUE("false")) { \
o = false; \
- else { \
+ } else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
- if (cont) \
- continue; \
+ continue; \
}
-#define CONF_MIN_no(um, min) false
-#define CONF_MIN_yes(um, min) ((um) < (min))
-#define CONF_MAX_no(um, max) false
-#define CONF_MAX_yes(um, max) ((um) > (max))
-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+#define CONF_MIN_no(um, min) false
+#define CONF_MIN_yes(um, min) ((um) < (min))
+#define CONF_MAX_no(um, max) false
+#define CONF_MAX_yes(um, max) ((um) > (max))
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
@@ -1079,13 +990,15 @@ malloc_conf_init(void)
k, klen, v, vlen); \
} else if (clip) { \
if (CONF_MIN_##check_min(um, \
- (t)(min))) \
+ (t)(min))) { \
o = (t)(min); \
- else if (CONF_MAX_##check_max( \
- um, (t)(max))) \
+ } else if ( \
+ CONF_MAX_##check_max(um, \
+ (t)(max))) { \
o = (t)(max); \
- else \
+ } else { \
o = (t)um; \
+ } \
} else { \
if (CONF_MIN_##check_min(um, \
(t)(min)) || \
@@ -1095,19 +1008,20 @@ malloc_conf_init(void)
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
- } else \
+ } else { \
o = (t)um; \
+ } \
} \
continue; \
}
-#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
+#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
check_min, check_max, clip)
-#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
+#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
-#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
char *end; \
@@ -1124,11 +1038,12 @@ malloc_conf_init(void)
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
- } else \
+ } else { \
o = l; \
+ } \
continue; \
}
-#define CONF_HANDLE_CHAR_P(o, n, d) \
+#define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
@@ -1138,27 +1053,19 @@ malloc_conf_init(void)
continue; \
}
- CONF_HANDLE_BOOL(opt_abort, "abort", true)
- /*
- * Chunks always require at least one header page, as
- * many as 2^(LG_SIZE_CLASS_GROUP+1) data pages (plus an
- * additional page in the presence of cache-oblivious
- * large), and possibly an additional page in the
- * presence of redzones. In order to simplify options
- * processing, use a conservative bound that
- * accommodates all these constraints.
- */
- CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- LG_SIZE_CLASS_GROUP + 1 + ((config_cache_oblivious
- || config_fill) ? 1 : 0), (sizeof(size_t) << 3) - 1,
- yes, yes, true)
+ CONF_HANDLE_BOOL(opt_abort, "abort")
+ CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
+ if (opt_abort_conf && had_conf_error) {
+ malloc_abort_invalid_conf();
+ }
+ CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
- if (chunk_dss_prec_set(i)) {
+ if (extent_dss_prec_set(i)) {
malloc_conf_error(
"Error setting dss",
k, klen, v, vlen);
@@ -1178,45 +1085,25 @@ malloc_conf_init(void)
}
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
UINT_MAX, yes, no, false)
- if (strncmp("purge", k, klen) == 0) {
- int i;
- bool match = false;
- for (i = 0; i < purge_mode_limit; i++) {
- if (strncmp(purge_mode_names[i], v,
- vlen) == 0) {
- opt_purge = (purge_mode_t)i;
- match = true;
- break;
- }
- }
- if (!match) {
- malloc_conf_error("Invalid conf value",
- k, klen, v, vlen);
- }
+ CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
+ "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
+ QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
+ SSIZE_MAX);
+ CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
+ "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
+ QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
+ SSIZE_MAX);
+ CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
+ if (CONF_MATCH("stats_print_opts")) {
+ init_opt_stats_print_opts(v, vlen);
continue;
}
- CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
- -1, (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
- NSTIME_SEC_MAX);
- CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
if (CONF_MATCH("junk")) {
if (CONF_MATCH_VALUE("true")) {
- if (config_valgrind &&
- unlikely(in_valgrind)) {
- malloc_conf_error(
- "Deallocation-time "
- "junk filling cannot "
- "be enabled while "
- "running inside "
- "Valgrind", k, klen, v,
- vlen);
- } else {
- opt_junk = "true";
- opt_junk_alloc = true;
- opt_junk_free = true;
- }
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
} else if (CONF_MATCH_VALUE("false")) {
opt_junk = "false";
opt_junk_alloc = opt_junk_free =
@@ -1226,20 +1113,9 @@ malloc_conf_init(void)
opt_junk_alloc = true;
opt_junk_free = false;
} else if (CONF_MATCH_VALUE("free")) {
- if (config_valgrind &&
- unlikely(in_valgrind)) {
- malloc_conf_error(
- "Deallocation-time "
- "junk filling cannot "
- "be enabled while "
- "running inside "
- "Valgrind", k, klen, v,
- vlen);
- } else {
- opt_junk = "free";
- opt_junk_alloc = false;
- opt_junk_free = true;
- }
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
} else {
malloc_conf_error(
"Invalid conf value", k,
@@ -1247,60 +1123,59 @@ malloc_conf_init(void)
}
continue;
}
- CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX, no, no, false)
- CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
- CONF_HANDLE_BOOL(opt_zero, "zero", true)
+ CONF_HANDLE_BOOL(opt_zero, "zero")
}
if (config_utrace) {
- CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
+ CONF_HANDLE_BOOL(opt_utrace, "utrace")
}
if (config_xmalloc) {
- CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
+ CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
- if (config_tcache) {
- CONF_HANDLE_BOOL(opt_tcache, "tcache",
- !config_valgrind || !in_valgrind)
- if (CONF_MATCH("tcache")) {
- assert(config_valgrind && in_valgrind);
- if (opt_tcache) {
- opt_tcache = false;
- malloc_conf_error(
- "tcache cannot be enabled "
- "while running inside Valgrind",
- k, klen, v, vlen);
+ CONF_HANDLE_BOOL(opt_tcache, "tcache")
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
+ -1, (sizeof(size_t) << 3) - 1)
+ if (strncmp("percpu_arena", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = percpu_arena_mode_names_base; i <
+ percpu_arena_mode_names_limit; i++) {
+ if (strncmp(percpu_arena_mode_names[i],
+ v, vlen) == 0) {
+ if (!have_percpu_arena) {
+ malloc_conf_error(
+ "No getcpu support",
+ k, klen, v, vlen);
+ }
+ opt_percpu_arena = i;
+ match = true;
+ break;
}
- continue;
}
- CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
- "lg_tcache_max", -1,
- (sizeof(size_t) << 3) - 1)
- }
- if (config_thp) {
- CONF_HANDLE_BOOL(opt_thp, "thp", true)
+ if (!match) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
}
+ CONF_HANDLE_BOOL(opt_background_thread,
+ "background_thread");
if (config_prof) {
- CONF_HANDLE_BOOL(opt_prof, "prof", true)
+ CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
- CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
- true)
+ CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
- "prof_thread_active_init", true)
+ "prof_thread_active_init")
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0, (sizeof(uint64_t) << 3)
- 1, no, yes, true)
- CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
- true)
+ CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
(sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
- true)
- CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
- true)
- CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
- true)
+ CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
+ CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
+ CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
@@ -1321,9 +1196,7 @@ malloc_conf_init(void)
}
static bool
-malloc_init_hard_needed(void)
-{
-
+malloc_init_hard_needed(void) {
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
malloc_init_recursible)) {
/*
@@ -1331,92 +1204,96 @@ malloc_init_hard_needed(void)
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
- return (false);
+ return false;
}
#ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
- spin_t spinner;
-
/* Busy-wait until the initializing thread completes. */
- spin_init(&spinner);
+ spin_t spinner = SPIN_INITIALIZER;
do {
malloc_mutex_unlock(TSDN_NULL, &init_lock);
spin_adaptive(&spinner);
malloc_mutex_lock(TSDN_NULL, &init_lock);
} while (!malloc_initialized());
- return (false);
+ return false;
}
#endif
- return (true);
+ return true;
}
static bool
-malloc_init_hard_a0_locked()
-{
-
+malloc_init_hard_a0_locked() {
malloc_initializer = INITIALIZER;
- if (config_prof)
+ if (config_prof) {
prof_boot0();
+ }
malloc_conf_init();
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
}
- pages_boot();
- if (base_boot())
- return (true);
- if (chunk_boot())
- return (true);
- if (ctl_boot())
- return (true);
- if (config_prof)
+ if (pages_boot()) {
+ return true;
+ }
+ if (base_boot(TSDN_NULL)) {
+ return true;
+ }
+ if (extent_boot()) {
+ return true;
+ }
+ if (ctl_boot()) {
+ return true;
+ }
+ if (config_prof) {
prof_boot1();
+ }
arena_boot();
- if (config_tcache && tcache_boot(TSDN_NULL))
- return (true);
- if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
- return (true);
+ if (tcache_boot(TSDN_NULL)) {
+ return true;
+ }
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
narenas_auto = 1;
- narenas_total_set(narenas_auto);
- arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0) == NULL)
- return (true);
-
+ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
+ == NULL) {
+ return true;
+ }
+ a0 = arena_get(TSDN_NULL, 0, false);
malloc_init_state = malloc_init_a0_initialized;
- return (false);
+ return false;
}
static bool
-malloc_init_hard_a0(void)
-{
+malloc_init_hard_a0(void) {
bool ret;
malloc_mutex_lock(TSDN_NULL, &init_lock);
ret = malloc_init_hard_a0_locked();
malloc_mutex_unlock(TSDN_NULL, &init_lock);
- return (ret);
+ return ret;
}
/* Initialize data structures which may trigger recursive allocation. */
static bool
-malloc_init_hard_recursible(void)
-{
-
+malloc_init_hard_recursible(void) {
malloc_init_state = malloc_init_recursible;
ncpus = malloc_ncpus();
@@ -1428,99 +1305,216 @@ malloc_init_hard_recursible(void)
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
- return (true);
+ }
+ return true;
}
#endif
- return (false);
+ if (background_thread_boot0()) {
+ return true;
+ }
+
+ return false;
}
-static bool
-malloc_init_hard_finish(tsdn_t *tsdn)
-{
+static unsigned
+malloc_narenas_default(void) {
+ assert(ncpus > 0);
+ /*
+ * For SMP systems, create more than one arena per CPU by
+ * default.
+ */
+ if (ncpus > 1) {
+ return ncpus << 2;
+ } else {
+ return 1;
+ }
+}
+
+static percpu_arena_mode_t
+percpu_arena_as_initialized(percpu_arena_mode_t mode) {
+ assert(!malloc_initialized());
+ assert(mode <= percpu_arena_disabled);
- if (malloc_mutex_boot())
- return (true);
+ if (mode != percpu_arena_disabled) {
+ mode += percpu_arena_mode_enabled_base;
+ }
+
+ return mode;
+}
+static bool
+malloc_init_narenas(void) {
+ assert(ncpus > 0);
+
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ if (!have_percpu_arena || malloc_getcpu() < 0) {
+ opt_percpu_arena = percpu_arena_disabled;
+ malloc_printf("<jemalloc>: perCPU arena getcpu() not "
+ "available. Setting narenas to %u.\n", opt_narenas ?
+ opt_narenas : malloc_narenas_default());
+ if (opt_abort) {
+ abort();
+ }
+ } else {
+ if (ncpus >= MALLOCX_ARENA_LIMIT) {
+ malloc_printf("<jemalloc>: narenas w/ percpu"
+ "arena beyond limit (%d)\n", ncpus);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+ /* NB: opt_percpu_arena isn't fully initialized yet. */
+ if (percpu_arena_as_initialized(opt_percpu_arena) ==
+ per_phycpu_arena && ncpus % 2 != 0) {
+ malloc_printf("<jemalloc>: invalid "
+ "configuration -- per physical CPU arena "
+ "with odd number (%u) of CPUs (no hyper "
+ "threading?).\n", ncpus);
+ if (opt_abort)
+ abort();
+ }
+ unsigned n = percpu_arena_ind_limit(
+ percpu_arena_as_initialized(opt_percpu_arena));
+ if (opt_narenas < n) {
+ /*
+ * If narenas is specified with percpu_arena
+ * enabled, actual narenas is set as the greater
+ * of the two. percpu_arena_choose will be free
+ * to use any of the arenas based on CPU
+ * id. This is conservative (at a small cost)
+ * but ensures correctness.
+ *
+ * If for some reason the ncpus determined at
+ * boot is not the actual number (e.g. because
+ * of affinity setting from numactl), reserving
+ * narenas this way provides a workaround for
+ * percpu_arena.
+ */
+ opt_narenas = n;
+ }
+ }
+ }
if (opt_narenas == 0) {
- /*
- * For SMP systems, create more than one arena per CPU by
- * default.
- */
- if (ncpus > 1)
- opt_narenas = ncpus << 2;
- else
- opt_narenas = 1;
+ opt_narenas = malloc_narenas_default();
}
+ assert(opt_narenas > 0);
+
narenas_auto = opt_narenas;
/*
* Limit the number of arenas to the indexing range of MALLOCX_ARENA().
*/
- if (narenas_auto > MALLOCX_ARENA_MAX) {
- narenas_auto = MALLOCX_ARENA_MAX;
+ if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
+ narenas_auto = MALLOCX_ARENA_LIMIT - 1;
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto);
}
narenas_total_set(narenas_auto);
- /* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
- (MALLOCX_ARENA_MAX+1));
- if (arenas == NULL)
- return (true);
- /* Copy the pointer to the one arena that was already initialized. */
- arena_set(0, a0);
+ return false;
+}
+
+static void
+malloc_init_percpu(void) {
+ opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
+}
+
+static bool
+malloc_init_hard_finish(void) {
+ if (malloc_mutex_boot()) {
+ return true;
+ }
malloc_init_state = malloc_init_initialized;
malloc_slow_flag_init();
- return (false);
+ return false;
+}
+
+static void
+malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
+ malloc_mutex_assert_owner(tsdn, &init_lock);
+ malloc_mutex_unlock(tsdn, &init_lock);
+ if (reentrancy_set) {
+ assert(!tsdn_null(tsdn));
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ assert(tsd_reentrancy_level_get(tsd) > 0);
+ post_reentrancy(tsd);
+ }
}
static bool
-malloc_init_hard(void)
-{
+malloc_init_hard(void) {
tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock();
#endif
malloc_mutex_lock(TSDN_NULL, &init_lock);
+
+#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
+ malloc_init_hard_cleanup(tsdn, reentrancy); \
+ return ret;
+
if (!malloc_init_hard_needed()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- return (false);
+ UNLOCK_RETURN(TSDN_NULL, false, false)
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- return (true);
+ UNLOCK_RETURN(TSDN_NULL, true, false)
}
malloc_mutex_unlock(TSDN_NULL, &init_lock);
/* Recursive allocation relies on functional tsd. */
tsd = malloc_tsd_boot0();
- if (tsd == NULL)
- return (true);
- if (malloc_init_hard_recursible())
- return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+ if (tsd == NULL) {
+ return true;
+ }
+ if (malloc_init_hard_recursible()) {
+ return true;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+ /* Set reentrancy level to 1 during init. */
+ pre_reentrancy(tsd);
+ /* Initialize narenas before prof_boot2 (for allocation). */
+ if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
if (config_prof && prof_boot2(tsd)) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
- return (true);
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
- if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
- return (true);
- }
+ malloc_init_percpu();
+ if (malloc_init_hard_finish()) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
+ post_reentrancy(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+
malloc_tsd_boot1();
- return (false);
+ /* Update TSD after tsd_boot1. */
+ tsd = tsd_fetch();
+ if (opt_background_thread) {
+ assert(have_background_thread);
+ /*
+ * Need to finish init & unlock first before creating background
+ * threads (pthread_create depends on malloc).
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ bool err = background_thread_create(tsd, 0);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ if (err) {
+ return true;
+ }
+ }
+#undef UNLOCK_RETURN
+ return false;
}
/*
@@ -1528,461 +1522,725 @@ malloc_init_hard(void)
*/
/******************************************************************************/
/*
- * Begin malloc(3)-compatible functions.
+ * Begin allocation-path internal functions and data structures.
*/
-static void *
-ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
- prof_tctx_t *tctx, bool slow_path)
-{
- void *p;
+/*
+ * Settings determined by the documented behavior of the allocation functions.
+ */
+typedef struct static_opts_s static_opts_t;
+struct static_opts_s {
+ /* Whether or not allocation size may overflow. */
+ bool may_overflow;
+ /* Whether or not allocations of size 0 should be treated as size 1. */
+ bool bump_empty_alloc;
+ /*
+ * Whether to assert that allocations are not of size 0 (after any
+ * bumping).
+ */
+ bool assert_nonempty_alloc;
- if (tctx == NULL)
- return (NULL);
- if (usize <= SMALL_MAXCLASS) {
- szind_t ind_large = size2index(LARGE_MINCLASS);
- p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
- } else
- p = ialloc(tsd, usize, ind, zero, slow_path);
+ /*
+ * Whether or not to modify the 'result' argument to malloc in case of
+ * error.
+ */
+ bool null_out_result_on_error;
+ /* Whether to set errno when we encounter an error condition. */
+ bool set_errno_on_error;
- return (p);
-}
+ /*
+ * The minimum valid alignment for functions requesting aligned storage.
+ */
+ size_t min_alignment;
-JEMALLOC_ALWAYS_INLINE_C void *
-ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
-{
- void *p;
- prof_tctx_t *tctx;
+ /* The error string to use if we oom. */
+ const char *oom_string;
+ /* The error string to use if the passed-in alignment is invalid. */
+ const char *invalid_alignment_string;
- tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
- p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
- else
- p = ialloc(tsd, usize, ind, zero, slow_path);
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return (NULL);
- }
- prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+ /*
+ * False if we're configured to skip some time-consuming operations.
+ *
+ * This isn't really a malloc "behavior", but it acts as a useful
+ * summary of several other static (or at least, static after program
+ * initialization) options.
+ */
+ bool slow;
+};
- return (p);
+JEMALLOC_ALWAYS_INLINE void
+static_opts_init(static_opts_t *static_opts) {
+ static_opts->may_overflow = false;
+ static_opts->bump_empty_alloc = false;
+ static_opts->assert_nonempty_alloc = false;
+ static_opts->null_out_result_on_error = false;
+ static_opts->set_errno_on_error = false;
+ static_opts->min_alignment = 0;
+ static_opts->oom_string = "";
+ static_opts->invalid_alignment_string = "";
+ static_opts->slow = false;
}
/*
- * ialloc_body() is inlined so that fast and slow paths are generated separately
- * with statically known slow_path.
- *
- * This function guarantees that *tsdn is non-NULL on success.
+ * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
+ * should have one constant here per magic value there. Note however that the
+ * representations need not be related.
*/
-JEMALLOC_ALWAYS_INLINE_C void *
-ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
- bool slow_path)
-{
- tsd_t *tsd;
- szind_t ind;
-
- if (slow_path && unlikely(malloc_init())) {
- *tsdn = NULL;
- return (NULL);
- }
+#define TCACHE_IND_NONE ((unsigned)-1)
+#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
+#define ARENA_IND_AUTOMATIC ((unsigned)-1)
+
+typedef struct dynamic_opts_s dynamic_opts_t;
+struct dynamic_opts_s {
+ void **result;
+ size_t num_items;
+ size_t item_size;
+ size_t alignment;
+ bool zero;
+ unsigned tcache_ind;
+ unsigned arena_ind;
+};
- tsd = tsd_fetch();
- *tsdn = tsd_tsdn(tsd);
- witness_assert_lockless(tsd_tsdn(tsd));
+JEMALLOC_ALWAYS_INLINE void
+dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
+ dynamic_opts->result = NULL;
+ dynamic_opts->num_items = 0;
+ dynamic_opts->item_size = 0;
+ dynamic_opts->alignment = 0;
+ dynamic_opts->zero = false;
+ dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
+ dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
+}
+
+/* ind is ignored if dopts->alignment > 0. */
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t size, size_t usize, szind_t ind) {
+ tcache_t *tcache;
+ arena_t *arena;
- ind = size2index(size);
- if (unlikely(ind >= NSIZES))
- return (NULL);
+ /* Fill in the tcache. */
+ if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
+ if (likely(!sopts->slow)) {
+ /* Getting tcache ptr unconditionally. */
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ tcache = tcache_get(tsd);
+ }
+ } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, dopts->tcache_ind);
+ }
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
- *usize = index2size(ind);
- assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ /* Fill in the arena. */
+ if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
+ /*
+ * In case of automatic arena management, we defer arena
+ * computation until as late as we can, hoping to fill the
+ * allocation out of the tcache.
+ */
+ arena = NULL;
+ } else {
+ arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
}
- if (config_prof && opt_prof)
- return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
+ if (unlikely(dopts->alignment != 0)) {
+ return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
+ dopts->zero, tcache, arena);
+ }
- return (ialloc(tsd, size, ind, zero, slow_path));
+ return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
+ arena, sopts->slow);
}
-JEMALLOC_ALWAYS_INLINE_C void
-ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
- bool update_errno, bool slow_path)
-{
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t usize, szind_t ind) {
+ void *ret;
- assert(!tsdn_null(tsdn) || ret == NULL);
+ /*
+ * For small allocations, sampling bumps the usize. If so, we allocate
+ * from the ind_large bucket.
+ */
+ szind_t ind_large;
+ size_t bumped_usize = usize;
- if (unlikely(ret == NULL)) {
- if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_printf("<jemalloc>: Error in %s(): out of "
- "memory\n", func);
- abort();
+ if (usize <= SMALL_MAXCLASS) {
+ assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
+ sz_sa2u(LARGE_MINCLASS, dopts->alignment))
+ == LARGE_MINCLASS);
+ ind_large = sz_size2index(LARGE_MINCLASS);
+ bumped_usize = sz_s2u(LARGE_MINCLASS);
+ ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
+ bumped_usize, ind_large);
+ if (unlikely(ret == NULL)) {
+ return NULL;
}
- if (update_errno)
- set_errno(ENOMEM);
- }
- if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(tsdn, ret, config_prof));
- *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
+ arena_prof_promote(tsd_tsdn(tsd), ret, usize);
+ } else {
+ ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
}
- witness_assert_lockless(tsdn);
+
+ return ret;
}
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_malloc(size_t size)
-{
- void *ret;
- tsdn_t *tsdn;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+/*
+ * Returns true if the allocation will overflow, and false otherwise. Sets
+ * *size to the product either way.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
+ size_t *size) {
+ /*
+ * This function is just num_items * item_size, except that we may have
+ * to check for overflow.
+ */
- if (size == 0)
- size = 1;
+ if (!may_overflow) {
+ assert(dopts->num_items == 1);
+ *size = dopts->item_size;
+ return false;
+ }
- if (likely(!malloc_slow)) {
- ret = ialloc_body(size, false, &tsdn, &usize, false);
- ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
- } else {
- ret = ialloc_body(size, false, &tsdn, &usize, true);
- ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
- UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
+ /* A size_t with its high-half bits all set to 1. */
+ const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
+
+ *size = dopts->item_size * dopts->num_items;
+
+ if (unlikely(*size == 0)) {
+ return (dopts->num_items != 0 && dopts->item_size != 0);
}
- return (ret);
+ /*
+ * We got a non-zero size, but we don't know if we overflowed to get
+ * there. To avoid having to do a divide, we'll be clever and note that
+ * if both A and B can be represented in N/2 bits, then their product
+ * can be represented in N bits (without the possibility of overflow).
+ */
+ if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
+ return false;
+ }
+ if (likely(*size / dopts->item_size == dopts->num_items)) {
+ return false;
+ }
+ return true;
}
-static void *
-imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
- prof_tctx_t *tctx)
-{
- void *p;
+JEMALLOC_ALWAYS_INLINE int
+imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
+ /* Where the actual allocated memory will live. */
+ void *allocation = NULL;
+ /* Filled in by compute_size_with_overflow below. */
+ size_t size = 0;
+ /*
+ * For unaligned allocations, we need only ind. For aligned
+ * allocations, or in case of stats or profiling we need usize.
+ *
+ * These are actually dead stores, in that their values are reset before
+ * any branch on their value is taken. Sometimes though, it's
+ * convenient to pass them as arguments before this point. To avoid
+ * undefined behavior then, we initialize them with dummy stores.
+ */
+ szind_t ind = 0;
+ size_t usize = 0;
- if (tctx == NULL)
- return (NULL);
- if (usize <= SMALL_MAXCLASS) {
- assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
- p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
- } else
- p = ipalloc(tsd, usize, alignment, false);
+ /* Reentrancy is only checked on slow path. */
+ int8_t reentrancy_level;
- return (p);
-}
+ /* Compute the amount of memory the user wants. */
+ if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
+ &size))) {
+ goto label_oom;
+ }
-JEMALLOC_ALWAYS_INLINE_C void *
-imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
-{
- void *p;
- prof_tctx_t *tctx;
+ /* Validate the user input. */
+ if (sopts->bump_empty_alloc) {
+ if (unlikely(size == 0)) {
+ size = 1;
+ }
+ }
- tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
- p = imemalign_prof_sample(tsd, alignment, usize, tctx);
- else
- p = ipalloc(tsd, usize, alignment, false);
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return (NULL);
+ if (sopts->assert_nonempty_alloc) {
+ assert (size != 0);
}
- prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
- return (p);
-}
+ if (unlikely(dopts->alignment < sopts->min_alignment
+ || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
+ goto label_invalid_alignment;
+ }
-JEMALLOC_ATTR(nonnull(1))
-static int
-imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
-{
- int ret;
- tsd_t *tsd;
- size_t usize;
- void *result;
+ /* This is the beginning of the "core" algorithm. */
+
+ if (dopts->alignment == 0) {
+ ind = sz_size2index(size);
+ if (unlikely(ind >= NSIZES)) {
+ goto label_oom;
+ }
+ if (config_stats || (config_prof && opt_prof)) {
+ usize = sz_index2size(ind);
+ assert(usize > 0 && usize <= LARGE_MAXCLASS);
+ }
+ } else {
+ usize = sz_sa2u(size, dopts->alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ goto label_oom;
+ }
+ }
- assert(min_alignment != 0);
+ check_entry_exit_locking(tsd_tsdn(tsd));
- if (unlikely(malloc_init())) {
- tsd = NULL;
- result = NULL;
- goto label_oom;
+ /*
+ * If we need to handle reentrancy, we can do it out of a
+ * known-initialized arena (i.e. arena 0).
+ */
+ reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (sopts->slow && unlikely(reentrancy_level > 0)) {
+ /*
+ * We should never specify particular arenas or tcaches from
+ * within our internal allocations.
+ */
+ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
+ dopts->tcache_ind == TCACHE_IND_NONE);
+ assert(dopts->arena_ind = ARENA_IND_AUTOMATIC);
+ dopts->tcache_ind = TCACHE_IND_NONE;
+ /* We know that arena 0 has already been initialized. */
+ dopts->arena_ind = 0;
}
- tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- if (size == 0)
- size = 1;
- /* Make sure that alignment is a large enough power of 2. */
- if (unlikely(((alignment - 1) & alignment) != 0
- || (alignment < min_alignment))) {
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error allocating "
- "aligned memory: invalid alignment\n");
- abort();
+ /* If profiling is on, get our profiling context. */
+ if (config_prof && opt_prof) {
+ /*
+ * Note that if we're going down this path, usize must have been
+ * initialized in the previous if statement.
+ */
+ prof_tctx_t *tctx = prof_alloc_prep(
+ tsd, usize, prof_active_get_unlocked(), true);
+
+ alloc_ctx_t alloc_ctx;
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
+ allocation = imalloc_no_sample(
+ sopts, dopts, tsd, usize, usize, ind);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ /*
+ * Note that ind might still be 0 here. This is fine;
+ * imalloc_sample ignores ind if dopts->alignment > 0.
+ */
+ allocation = imalloc_sample(
+ sopts, dopts, tsd, usize, ind);
+ alloc_ctx.slab = false;
+ } else {
+ allocation = NULL;
}
- result = NULL;
- ret = EINVAL;
- goto label_return;
- }
- usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
- result = NULL;
- goto label_oom;
+ if (unlikely(allocation == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ goto label_oom;
+ }
+ prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
+ } else {
+ /*
+ * If dopts->alignment > 0, then ind is still 0, but usize was
+ * computed in the previous if statement. Down the positive
+ * alignment path, imalloc_no_sample ignores ind and size
+ * (relying only on usize).
+ */
+ allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
+ ind);
+ if (unlikely(allocation == NULL)) {
+ goto label_oom;
+ }
}
- if (config_prof && opt_prof)
- result = imemalign_prof(tsd, alignment, usize);
- else
- result = ipalloc(tsd, usize, alignment, false);
- if (unlikely(result == NULL))
- goto label_oom;
- assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
+ /*
+ * Allocation has been done at this point. We still have some
+ * post-allocation work to do though.
+ */
+ assert(dopts->alignment == 0
+ || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
- *memptr = result;
- ret = 0;
-label_return:
- if (config_stats && likely(result != NULL)) {
- assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
+ if (config_stats) {
+ assert(usize == isalloc(tsd_tsdn(tsd), allocation));
*tsd_thread_allocatedp_get(tsd) += usize;
}
- UTRACE(0, size, result);
- JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
- false);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+
+ if (sopts->slow) {
+ UTRACE(0, size, allocation);
+ }
+
+ /* Success! */
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ *dopts->result = allocation;
+ return 0;
+
label_oom:
- assert(result == NULL);
+ if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->oom_string);
+ abort();
+ }
+
+ if (sopts->slow) {
+ UTRACE(NULL, size, NULL);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (sopts->set_errno_on_error) {
+ set_errno(ENOMEM);
+ }
+
+ if (sopts->null_out_result_on_error) {
+ *dopts->result = NULL;
+ }
+
+ return ENOMEM;
+
+ /*
+ * This label is only jumped to by one goto; we move it out of line
+ * anyways to avoid obscuring the non-error paths, and for symmetry with
+ * the oom case.
+ */
+label_invalid_alignment:
if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error allocating aligned memory: "
- "out of memory\n");
+ malloc_write(sopts->invalid_alignment_string);
abort();
}
- ret = ENOMEM;
- witness_assert_lockless(tsd_tsdn(tsd));
- goto label_return;
+
+ if (sopts->set_errno_on_error) {
+ set_errno(EINVAL);
+ }
+
+ if (sopts->slow) {
+ UTRACE(NULL, size, NULL);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (sopts->null_out_result_on_error) {
+ *dopts->result = NULL;
+ }
+
+ return EINVAL;
+}
+
+/* Returns the errno-style error code of the allocation. */
+JEMALLOC_ALWAYS_INLINE int
+imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
+ if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->oom_string);
+ abort();
+ }
+ UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
+ set_errno(ENOMEM);
+ *dopts->result = NULL;
+
+ return ENOMEM;
+ }
+
+ /* We always need the tsd. Let's grab it right away. */
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd);
+ if (likely(tsd_fast(tsd))) {
+ /* Fast and common path. */
+ tsd_assert_fast(tsd);
+ sopts->slow = false;
+ return imalloc_body(sopts, dopts, tsd);
+ } else {
+ sopts->slow = true;
+ return imalloc_body(sopts, dopts, tsd);
+ }
+}
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_malloc(size_t size) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+
+ imalloc(&sopts, &dopts);
+
+ return ret;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
-je_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
+je_posix_memalign(void **memptr, size_t alignment, size_t size) {
int ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.min_alignment = sizeof(void *);
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
- ret = imemalign(memptr, alignment, size, sizeof(void *));
+ dopts.result = memptr;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
- return (ret);
+ ret = imalloc(&sopts, &dopts);
+ return ret;
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
-je_aligned_alloc(size_t alignment, size_t size)
-{
+je_aligned_alloc(size_t alignment, size_t size) {
void *ret;
- int err;
- if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
- ret = NULL;
- set_errno(err);
- }
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
- return (ret);
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.min_alignment = 1;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ imalloc(&sopts, &dopts);
+ return ret;
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
-je_calloc(size_t num, size_t size)
-{
+je_calloc(size_t num, size_t size) {
void *ret;
- tsdn_t *tsdn;
- size_t num_size;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
- num_size = num * size;
- if (unlikely(num_size == 0)) {
- if (num == 0 || size == 0)
- num_size = 1;
- else
- num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
- /*
- * Try to avoid division here. We know that it isn't possible to
- * overflow during multiplication if neither operand uses any of the
- * most significant half of the bits in a size_t.
- */
- } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
- 2))) && (num_size / size != num)))
- num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
- if (likely(!malloc_slow)) {
- ret = ialloc_body(num_size, true, &tsdn, &usize, false);
- ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
- } else {
- ret = ialloc_body(num_size, true, &tsdn, &usize, true);
- ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
- UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
- }
+ sopts.may_overflow = true;
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = num;
+ dopts.item_size = size;
+ dopts.zero = true;
- return (ret);
+ imalloc(&sopts, &dopts);
+
+ return ret;
}
static void *
irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
void *p;
- if (tctx == NULL)
- return (NULL);
+ if (tctx == NULL) {
+ return NULL;
+ }
if (usize <= SMALL_MAXCLASS) {
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
- } else
+ if (p == NULL) {
+ return NULL;
+ }
+ arena_prof_promote(tsd_tsdn(tsd), p, usize);
+ } else {
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ }
- return (p);
+ return p;
}
-JEMALLOC_ALWAYS_INLINE_C void *
-irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
-{
+JEMALLOC_ALWAYS_INLINE void *
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ alloc_ctx_t *alloc_ctx) {
void *p;
bool prof_active;
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
- else
+ } else {
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ }
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
- return (NULL);
+ return NULL;
}
prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
old_tctx);
- return (p);
+ return p;
}
-JEMALLOC_INLINE_C void
-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
-{
- size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
-
- witness_assert_lockless(tsd_tsdn(tsd));
+JEMALLOC_ALWAYS_INLINE void
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
+ if (!slow_path) {
+ tsd_assert_fast(tsd);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
+ assert(slow_path);
+ }
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+
+ size_t usize;
if (config_prof && opt_prof) {
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- prof_free(tsd, ptr, usize);
- } else if (config_stats || config_valgrind)
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_stats)
+ usize = sz_index2size(alloc_ctx.szind);
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ } else if (config_stats) {
+ usize = sz_index2size(alloc_ctx.szind);
+ }
+ if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
+ }
- if (likely(!slow_path))
- iqalloc(tsd, ptr, tcache, false);
- else {
- if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
- iqalloc(tsd, ptr, tcache, true);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ if (likely(!slow_path)) {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
+ false);
+ } else {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
+ true);
}
}
-JEMALLOC_INLINE_C void
-isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
-{
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
-
- witness_assert_lockless(tsd_tsdn(tsd));
+JEMALLOC_ALWAYS_INLINE void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
+ if (!slow_path) {
+ tsd_assert_fast(tsd);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
+ assert(slow_path);
+ }
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- if (config_prof && opt_prof)
- prof_free(tsd, ptr, usize);
- if (config_stats)
+ alloc_ctx_t alloc_ctx, *ctx;
+ if (config_prof && opt_prof) {
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind == sz_size2index(usize));
+ ctx = &alloc_ctx;
+ prof_free(tsd, ptr, usize, ctx);
+ } else {
+ ctx = NULL;
+ }
+
+ if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
- if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
- isqalloc(tsd, ptr, usize, tcache, slow_path);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ }
+
+ if (likely(!slow_path)) {
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
+ } else {
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
+ }
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
-je_realloc(void *ptr, size_t size)
-{
+je_realloc(void *ptr, size_t size) {
void *ret;
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) {
if (ptr != NULL) {
- tsd_t *tsd;
-
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
- tsd = tsd_fetch();
- ifree(tsd, ptr, tcache_get(tsd, false), true);
- return (NULL);
+ tcache_t *tcache;
+ tsd_t *tsd = tsd_fetch();
+ if (tsd_reentrancy_level_get(tsd) == 0) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ ifree(tsd, ptr, tcache, true);
+ return NULL;
}
size = 1;
}
if (likely(ptr != NULL)) {
- tsd_t *tsd;
-
assert(malloc_initialized() || IS_INITIALIZER);
- malloc_thread_init();
- tsd = tsd_fetch();
-
- witness_assert_lockless(tsd_tsdn(tsd));
+ tsd_t *tsd = tsd_fetch();
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind)) {
- old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
- u2rz(old_usize);
- }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
- usize = s2u(size);
- ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
- NULL : irealloc_prof(tsd, ptr, old_usize, usize);
+ usize = sz_s2u(size);
+ ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
+ NULL : irealloc_prof(tsd, ptr, old_usize, usize,
+ &alloc_ctx);
} else {
- if (config_stats || (config_valgrind &&
- unlikely(in_valgrind)))
- usize = s2u(size);
+ if (config_stats) {
+ usize = sz_s2u(size);
+ }
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- if (likely(!malloc_slow))
- ret = ialloc_body(size, false, &tsdn, &usize, false);
- else
- ret = ialloc_body(size, false, &tsdn, &usize, true);
- assert(!tsdn_null(tsdn) || ret == NULL);
+ return je_malloc(size);
}
if (unlikely(ret == NULL)) {
@@ -1996,31 +2254,38 @@ je_realloc(void *ptr, size_t size)
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
- assert(usize == isalloc(tsdn, ret, config_prof));
+ assert(usize == isalloc(tsdn, ret));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
- old_usize, old_rzsize, maybe, false);
- witness_assert_lockless(tsdn);
- return (ret);
+ check_entry_exit_locking(tsdn);
+ return ret;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_free(void *ptr)
-{
-
+je_free(void *ptr) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- if (likely(!malloc_slow))
- ifree(tsd, ptr, tcache_get(tsd, false), false);
- else
- ifree(tsd, ptr, tcache_get(tsd, false), true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (likely(tsd_fast(tsd))) {
+ tsd_assert_fast(tsd);
+ /* Unconditionally get tcache ptr on fast path. */
+ tcache = tsd_tcachep_get(tsd);
+ ifree(tsd, ptr, tcache, false);
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ ifree(tsd, ptr, tcache, true);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
}
@@ -2036,12 +2301,29 @@ je_free(void *ptr)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)
-je_memalign(size_t alignment, size_t size)
-{
- void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
- ret = NULL;
- return (ret);
+je_memalign(size_t alignment, size_t size) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.min_alignment = 1;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+ sopts.null_out_result_on_error = true;
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ imalloc(&sopts, &dopts);
+ return ret;
}
#endif
@@ -2049,24 +2331,35 @@ je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)
-je_valloc(size_t size)
-{
- void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
- ret = NULL;
- return (ret);
+je_valloc(size_t size) {
+ void *ret;
+
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.min_alignment = PAGE;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = PAGE;
+
+ imalloc(&sopts, &dopts);
+
+ return ret;
}
#endif
-/*
- * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
- * #define je_malloc malloc
- */
-#define malloc_is_malloc 1
-#define is_malloc_(a) malloc_is_ ## a
-#define is_malloc(a) is_malloc_(a)
-
-#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
+#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
@@ -2079,33 +2372,44 @@ je_valloc(size_t size)
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
-# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
-# endif
+# endif
-#ifdef CPU_COUNT
+# ifdef CPU_COUNT
/*
* To enable static linking with glibc, the libc specific malloc interface must
* be implemented also, so none of glibc's malloc.o functions are added to the
* link.
*/
-#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
+# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
/* To force macro expansion of je_ prefix before stringification. */
-#define PREALIAS(je_fn) ALIAS(je_fn)
-void *__libc_malloc(size_t size) PREALIAS(je_malloc);
-void __libc_free(void* ptr) PREALIAS(je_free);
-void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
-void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
-void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
-void *__libc_valloc(size_t size) PREALIAS(je_valloc);
-int __posix_memalign(void** r, size_t a, size_t s)
- PREALIAS(je_posix_memalign);
-#undef PREALIAS
-#undef ALIAS
-
-#endif
-
+# define PREALIAS(je_fn) ALIAS(je_fn)
+# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
+void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
+void __libc_free(void* ptr) PREALIAS(je_free);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
+void *__libc_malloc(size_t size) PREALIAS(je_malloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
+void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
+void *__libc_valloc(size_t size) PREALIAS(je_valloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
+int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
+# endif
+# undef PREALIAS
+# undef ALIAS
+# endif
#endif
/*
@@ -2116,235 +2420,95 @@ int __posix_memalign(void** r, size_t a, size_t s)
* Begin non-standard functions.
*/
-JEMALLOC_ALWAYS_INLINE_C bool
-imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
- size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
-{
-
- if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
- *alignment = 0;
- *usize = s2u(size);
- } else {
- *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- *usize = sa2u(size, *alignment);
- }
- if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
- return (true);
- *zero = MALLOCX_ZERO_GET(flags);
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
- *tcache = NULL;
- else
- *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
- *tcache = tcache_get(tsd, true);
- if ((flags & MALLOCX_ARENA_MASK) != 0) {
- unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
- if (unlikely(*arena == NULL))
- return (true);
- } else
- *arena = NULL;
- return (false);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, bool slow_path)
-{
- szind_t ind;
-
- if (unlikely(alignment != 0))
- return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
- ind = size2index(usize);
- assert(ind < NSIZES);
- return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
- slow_path));
-}
-
-static void *
-imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, bool slow_path)
-{
- void *p;
-
- if (usize <= SMALL_MAXCLASS) {
- assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
- sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
- p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
- tcache, arena, slow_path);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(tsdn, p, usize);
- } else {
- p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
- slow_path);
- }
-
- return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
-{
- void *p;
- size_t alignment;
- bool zero;
- tcache_t *tcache;
- arena_t *arena;
- prof_tctx_t *tctx;
-
- if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
- &zero, &tcache, &arena)))
- return (NULL);
- tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
- if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
- p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
- tcache, arena, slow_path);
- } else if ((uintptr_t)tctx > (uintptr_t)1U) {
- p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
- tcache, arena, slow_path);
- } else
- p = NULL;
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return (NULL);
- }
- prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
-
- assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
- return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
- bool slow_path)
-{
- void *p;
- size_t alignment;
- bool zero;
- tcache_t *tcache;
- arena_t *arena;
-
- if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
- &zero, &tcache, &arena)))
- return (NULL);
- p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
- arena, slow_path);
- assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
- return (p);
-}
-
-/* This function guarantees that *tsdn is non-NULL on success. */
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
- bool slow_path)
-{
- tsd_t *tsd;
-
- if (slow_path && unlikely(malloc_init())) {
- *tsdn = NULL;
- return (NULL);
- }
-
- tsd = tsd_fetch();
- *tsdn = tsd_tsdn(tsd);
- witness_assert_lockless(tsd_tsdn(tsd));
-
- if (likely(flags == 0)) {
- szind_t ind = size2index(size);
- if (unlikely(ind >= NSIZES))
- return (NULL);
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
- *usize = index2size(ind);
- assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
- }
-
- if (config_prof && opt_prof) {
- return (ialloc_prof(tsd, *usize, ind, false,
- slow_path));
- }
-
- return (ialloc(tsd, size, ind, false, slow_path));
- }
-
- if (config_prof && opt_prof)
- return (imallocx_prof(tsd, size, flags, usize, slow_path));
-
- return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
-}
-
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_mallocx(size_t size, int flags)
-{
- tsdn_t *tsdn;
- void *p;
- size_t usize;
+je_mallocx(size_t size, int flags) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.assert_nonempty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ if (unlikely(flags != 0)) {
+ if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
+ dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ }
- assert(size != 0);
+ dopts.zero = MALLOCX_ZERO_GET(flags);
- if (likely(!malloc_slow)) {
- p = imallocx_body(size, flags, &tsdn, &usize, false);
- ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
- } else {
- p = imallocx_body(size, flags, &tsdn, &usize, true);
- ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
- MALLOCX_ZERO_GET(flags));
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK)
+ == MALLOCX_TCACHE_NONE) {
+ dopts.tcache_ind = TCACHE_IND_NONE;
+ } else {
+ dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
+ }
+ } else {
+ dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
+ }
+
+ if ((flags & MALLOCX_ARENA_MASK) != 0)
+ dopts.arena_ind = MALLOCX_ARENA_GET(flags);
}
- return (p);
+ imalloc(&sopts, &dopts);
+ return ret;
}
static void *
-irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
+irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
void *p;
- if (tctx == NULL)
- return (NULL);
+ if (tctx == NULL) {
+ return NULL;
+ }
if (usize <= SMALL_MAXCLASS) {
- p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
- zero, tcache, arena);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
+ alignment, zero, tcache, arena);
+ if (p == NULL) {
+ return NULL;
+ }
+ arena_prof_promote(tsdn, p, usize);
} else {
- p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
+ p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
tcache, arena);
}
- return (p);
+ return p;
}
-JEMALLOC_ALWAYS_INLINE_C void *
+JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
- arena_t *arena)
-{
+ arena_t *arena, alloc_ctx_t *alloc_ctx) {
void *p;
bool prof_active;
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
- alignment, zero, tcache, arena, tctx);
+ p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
+ *usize, alignment, zero, tcache, arena, tctx);
} else {
- p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
- tcache, arena);
+ p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
+ zero, tcache, arena);
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, false);
- return (NULL);
+ return NULL;
}
if (p == old_ptr && alignment != 0) {
@@ -2356,24 +2520,22 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
- *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ *usize = isalloc(tsd_tsdn(tsd), p);
}
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
old_usize, old_tctx);
- return (p);
+ return p;
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
-je_rallocx(void *ptr, size_t size, int flags)
-{
+je_rallocx(void *ptr, size_t size, int flags) {
void *p;
tsd_t *tsd;
size_t usize;
size_t old_usize;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
arena_t *arena;
@@ -2382,45 +2544,56 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL);
assert(size != 0);
assert(malloc_initialized() || IS_INITIALIZER);
- malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
+ check_entry_exit_locking(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
- if (unlikely(arena == NULL))
+ if (unlikely(arena == NULL)) {
goto label_oom;
- } else
+ }
+ } else {
arena = NULL;
+ }
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
- else
+ } else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
- tcache = tcache_get(tsd, true);
-
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind))
- old_rzsize = u2rz(old_usize);
+ }
+ } else {
+ tcache = tcache_get(tsd);
+ }
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ usize = (alignment == 0) ?
+ sz_s2u(size) : sz_sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
goto label_oom;
+ }
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
- zero, tcache, arena);
- if (unlikely(p == NULL))
+ zero, tcache, arena, &alloc_ctx);
+ if (unlikely(p == NULL)) {
goto label_oom;
+ }
} else {
- p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
- tcache, arena);
- if (unlikely(p == NULL))
+ p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
+ zero, tcache, arena);
+ if (unlikely(p == NULL)) {
goto label_oom;
- if (config_stats || (config_valgrind && unlikely(in_valgrind)))
- usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ }
+ if (config_stats) {
+ usize = isalloc(tsd_tsdn(tsd), p);
+ }
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2429,57 +2602,54 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
- old_usize, old_rzsize, no, zero);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (p);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
UTRACE(ptr, size, 0);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (NULL);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return NULL;
}
-JEMALLOC_ALWAYS_INLINE_C size_t
+JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero)
-{
+ size_t extra, size_t alignment, bool zero) {
size_t usize;
- if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
- return (old_usize);
- usize = isalloc(tsdn, ptr, config_prof);
+ if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
+ return old_usize;
+ }
+ usize = isalloc(tsdn, ptr);
- return (usize);
+ return usize;
}
static size_t
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
-{
+ size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
size_t usize;
- if (tctx == NULL)
- return (old_usize);
+ if (tctx == NULL) {
+ return old_usize;
+ }
usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
zero);
- return (usize);
+ return usize;
}
-JEMALLOC_ALWAYS_INLINE_C size_t
+JEMALLOC_ALWAYS_INLINE size_t
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero)
-{
+ size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
size_t usize_max, usize;
bool prof_active;
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
@@ -2487,18 +2657,18 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
* prof_realloc() will use the actual usize to decide whether to sample.
*/
if (alignment == 0) {
- usize_max = s2u(size+extra);
- assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
+ usize_max = sz_s2u(size+extra);
+ assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
} else {
- usize_max = sa2u(size+extra, alignment);
- if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
+ usize_max = sz_sa2u(size+extra, alignment);
+ if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
/*
* usize_max is out of range, and chances are that
* allocation will fail, but use the maximum possible
* value and carry on with prof_alloc_prep(), just in
* case allocation succeeds.
*/
- usize_max = HUGE_MAXCLASS;
+ usize_max = LARGE_MAXCLASS;
}
}
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
@@ -2512,20 +2682,18 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
}
if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false);
- return (usize);
+ return usize;
}
prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
old_tctx);
- return (usize);
+ return usize;
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_xallocx(void *ptr, size_t size, size_t extra, int flags)
-{
+je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd_t *tsd;
size_t usize, old_usize;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
@@ -2533,251 +2701,283 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized() || IS_INITIALIZER);
- malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
-
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
-
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
/*
* The API explicitly absolves itself of protecting against (size +
* extra) numerical overflow, but we may need to clamp extra to avoid
- * exceeding HUGE_MAXCLASS.
+ * exceeding LARGE_MAXCLASS.
*
* Ordinarily, size limit checking is handled deeper down, but here we
* have to check as part of (size + extra) clamping, since we need the
* clamped value in the above helper functions.
*/
- if (unlikely(size > HUGE_MAXCLASS)) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
usize = old_usize;
goto label_not_resized;
}
- if (unlikely(HUGE_MAXCLASS - size < extra))
- extra = HUGE_MAXCLASS - size;
-
- if (config_valgrind && unlikely(in_valgrind))
- old_rzsize = u2rz(old_usize);
+ if (unlikely(LARGE_MAXCLASS - size < extra)) {
+ extra = LARGE_MAXCLASS - size;
+ }
if (config_prof && opt_prof) {
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
- alignment, zero);
+ alignment, zero, &alloc_ctx);
} else {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
}
- if (unlikely(usize == old_usize))
+ if (unlikely(usize == old_usize)) {
goto label_not_resized;
+ }
if (config_stats) {
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
- old_usize, old_rzsize, no, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (usize);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return usize;
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
-je_sallocx(const void *ptr, int flags)
-{
+je_sallocx(const void *ptr, int flags) {
size_t usize;
tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
- malloc_thread_init();
+ assert(ptr != NULL);
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
+ check_entry_exit_locking(tsdn);
- if (config_ivsalloc)
- usize = ivsalloc(tsdn, ptr, config_prof);
- else
- usize = isalloc(tsdn, ptr, config_prof);
+ if (config_debug || force_ivsalloc) {
+ usize = ivsalloc(tsdn, ptr);
+ assert(force_ivsalloc || usize != 0);
+ } else {
+ usize = isalloc(tsdn, ptr);
+ }
- witness_assert_lockless(tsdn);
- return (usize);
+ check_entry_exit_locking(tsdn);
+ return usize;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_dallocx(void *ptr, int flags)
-{
- tsd_t *tsd;
- tcache_t *tcache;
-
+je_dallocx(void *ptr, int flags) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
+ tsd_t *tsd = tsd_fetch();
+ bool fast = tsd_fast(tsd);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
- else
+ } else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
- tcache = tcache_get(tsd, false);
+ }
+ } else {
+ if (likely(fast)) {
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ }
+ }
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
+ if (likely(fast)) {
+ tsd_assert_fast(tsd);
ifree(tsd, ptr, tcache, false);
- else
+ } else {
ifree(tsd, ptr, tcache, true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
-JEMALLOC_ALWAYS_INLINE_C size_t
-inallocx(tsdn_t *tsdn, size_t size, int flags)
-{
- size_t usize;
-
- witness_assert_lockless(tsdn);
+JEMALLOC_ALWAYS_INLINE size_t
+inallocx(tsdn_t *tsdn, size_t size, int flags) {
+ check_entry_exit_locking(tsdn);
- if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
- usize = s2u(size);
- else
- usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
- witness_assert_lockless(tsdn);
- return (usize);
+ size_t usize;
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
+ usize = sz_s2u(size);
+ } else {
+ usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ }
+ check_entry_exit_locking(tsdn);
+ return usize;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_sdallocx(void *ptr, size_t size, int flags)
-{
- tsd_t *tsd;
- tcache_t *tcache;
- size_t usize;
-
+je_sdallocx(void *ptr, size_t size, int flags) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd = tsd_fetch();
- usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
- witness_assert_lockless(tsd_tsdn(tsd));
+ tsd_t *tsd = tsd_fetch();
+ bool fast = tsd_fast(tsd);
+ size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
- else
+ } else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
- tcache = tcache_get(tsd, false);
+ }
+ } else {
+ if (likely(fast)) {
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ }
+ }
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
+ if (likely(fast)) {
+ tsd_assert_fast(tsd);
isfree(tsd, ptr, usize, tcache, false);
- else
+ } else {
isfree(tsd, ptr, usize, tcache, true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
-je_nallocx(size_t size, int flags)
-{
+je_nallocx(size_t size, int flags) {
size_t usize;
tsdn_t *tsdn;
assert(size != 0);
- if (unlikely(malloc_init()))
- return (0);
+ if (unlikely(malloc_init())) {
+ return 0;
+ }
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
+ check_entry_exit_locking(tsdn);
usize = inallocx(tsdn, size, flags);
- if (unlikely(usize > HUGE_MAXCLASS))
- return (0);
+ if (unlikely(usize > LARGE_MAXCLASS)) {
+ return 0;
+ }
- witness_assert_lockless(tsdn);
- return (usize);
+ check_entry_exit_locking(tsdn);
+ return usize;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
+ size_t newlen) {
int ret;
tsd_t *tsd;
- if (unlikely(malloc_init()))
- return (EAGAIN);
+ if (unlikely(malloc_init())) {
+ return EAGAIN;
+ }
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
+ check_entry_exit_locking(tsd_tsdn(tsd));
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return ret;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
int ret;
tsdn_t *tsdn;
- if (unlikely(malloc_init()))
- return (EAGAIN);
+ if (unlikely(malloc_init())) {
+ return EAGAIN;
+ }
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
+ check_entry_exit_locking(tsdn);
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
- witness_assert_lockless(tsdn);
- return (ret);
+ check_entry_exit_locking(tsdn);
+ return ret;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+ void *newp, size_t newlen) {
int ret;
tsd_t *tsd;
- if (unlikely(malloc_init()))
- return (EAGAIN);
+ if (unlikely(malloc_init())) {
+ return EAGAIN;
+ }
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
+ check_entry_exit_locking(tsd_tsdn(tsd));
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return ret;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
+ const char *opts) {
tsdn_t *tsdn;
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
+ check_entry_exit_locking(tsdn);
stats_print(write_cb, cbopaque, opts);
- witness_assert_lockless(tsdn);
+ check_entry_exit_locking(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
-{
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
size_t ret;
tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
- malloc_thread_init();
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
+ check_entry_exit_locking(tsdn);
- if (config_ivsalloc)
- ret = ivsalloc(tsdn, ptr, config_prof);
- else
- ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
+ if (unlikely(ptr == NULL)) {
+ ret = 0;
+ } else {
+ if (config_debug || force_ivsalloc) {
+ ret = ivsalloc(tsdn, ptr);
+ assert(force_ivsalloc || ret != 0);
+ } else {
+ ret = isalloc(tsdn, ptr);
+ }
+ }
- witness_assert_lockless(tsdn);
- return (ret);
+ check_entry_exit_locking(tsdn);
+ return ret;
}
/*
@@ -2798,78 +2998,74 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
#define ALLOCM_ERR_NOT_MOVED 2
int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
-{
- void *p;
-
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
assert(ptr != NULL);
- p = je_mallocx(size, flags);
- if (p == NULL)
+ void *p = je_mallocx(size, flags);
+ if (p == NULL) {
return (ALLOCM_ERR_OOM);
- if (rsize != NULL)
- *rsize = isalloc(tsdn_fetch(), p, config_prof);
+ }
+ if (rsize != NULL) {
+ *rsize = isalloc(tsdn_fetch(), p);
+ }
*ptr = p;
- return (ALLOCM_SUCCESS);
+ return ALLOCM_SUCCESS;
}
int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
-{
- int ret;
- bool no_move = flags & ALLOCM_NO_MOVE;
-
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
assert(ptr != NULL);
assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
+ int ret;
+ bool no_move = flags & ALLOCM_NO_MOVE;
+
if (no_move) {
size_t usize = je_xallocx(*ptr, size, extra, flags);
ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
- if (rsize != NULL)
+ if (rsize != NULL) {
*rsize = usize;
+ }
} else {
void *p = je_rallocx(*ptr, size+extra, flags);
if (p != NULL) {
*ptr = p;
ret = ALLOCM_SUCCESS;
- } else
+ } else {
ret = ALLOCM_ERR_OOM;
- if (rsize != NULL)
- *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
+ }
+ if (rsize != NULL) {
+ *rsize = isalloc(tsdn_fetch(), *ptr);
+ }
}
- return (ret);
+ return ret;
}
int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
-{
-
+je_sallocm(const void *ptr, size_t *rsize, int flags) {
assert(rsize != NULL);
*rsize = je_sallocx(ptr, flags);
- return (ALLOCM_SUCCESS);
+ return ALLOCM_SUCCESS;
}
int
-je_dallocm(void *ptr, int flags)
-{
-
+je_dallocm(void *ptr, int flags) {
je_dallocx(ptr, flags);
- return (ALLOCM_SUCCESS);
+ return ALLOCM_SUCCESS;
}
int
-je_nallocm(size_t *rsize, size_t size, int flags)
-{
- size_t usize;
-
- usize = je_nallocx(size, flags);
- if (usize == 0)
- return (ALLOCM_ERR_OOM);
- if (rsize != NULL)
+je_nallocm(size_t *rsize, size_t size, int flags) {
+ size_t usize = je_nallocx(size, flags);
+ if (usize == 0) {
+ return ALLOCM_ERR_OOM;
+ }
+ if (rsize != NULL) {
*rsize = usize;
- return (ALLOCM_SUCCESS);
+ }
+ return ALLOCM_SUCCESS;
}
#undef ALLOCM_LG_ALIGN
@@ -2906,9 +3102,7 @@ je_nallocm(size_t *rsize, size_t size, int flags)
#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
-jemalloc_constructor(void)
-{
-
+jemalloc_constructor(void) {
malloc_init();
}
#endif
@@ -2926,8 +3120,9 @@ _malloc_prefork(void)
arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (!malloc_initialized())
+ if (!malloc_initialized()) {
return;
+ }
#endif
assert(malloc_initialized());
@@ -2935,13 +3130,20 @@ _malloc_prefork(void)
narenas = narenas_total_get();
- witness_prefork(tsd);
+ witness_prefork(tsd_witness_tsdp_get(tsd));
/* Acquire all mutexes in a safe order. */
ctl_prefork(tsd_tsdn(tsd));
tcache_prefork(tsd_tsdn(tsd));
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
+ if (have_background_thread) {
+ background_thread_prefork0(tsd_tsdn(tsd));
+ }
prof_prefork0(tsd_tsdn(tsd));
- for (i = 0; i < 3; i++) {
+ if (have_background_thread) {
+ background_thread_prefork1(tsd_tsdn(tsd));
+ }
+ /* Break arena prefork into stages to preserve lock order. */
+ for (i = 0; i < 7; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
NULL) {
@@ -2955,16 +3157,23 @@ _malloc_prefork(void)
case 2:
arena_prefork2(tsd_tsdn(tsd), arena);
break;
+ case 3:
+ arena_prefork3(tsd_tsdn(tsd), arena);
+ break;
+ case 4:
+ arena_prefork4(tsd_tsdn(tsd), arena);
+ break;
+ case 5:
+ arena_prefork5(tsd_tsdn(tsd), arena);
+ break;
+ case 6:
+ arena_prefork6(tsd_tsdn(tsd), arena);
+ break;
default: not_reached();
}
}
}
}
- base_prefork(tsd_tsdn(tsd));
- for (i = 0; i < narenas; i++) {
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_prefork3(tsd_tsdn(tsd), arena);
- }
prof_prefork1(tsd_tsdn(tsd));
}
@@ -2980,31 +3189,34 @@ _malloc_postfork(void)
unsigned i, narenas;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (!malloc_initialized())
+ if (!malloc_initialized()) {
return;
+ }
#endif
assert(malloc_initialized());
tsd = tsd_fetch();
- witness_postfork_parent(tsd);
+ witness_postfork_parent(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
- base_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
arena_postfork_parent(tsd_tsdn(tsd), arena);
+ }
}
prof_postfork_parent(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_postfork_parent(tsd_tsdn(tsd));
+ }
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
tcache_postfork_parent(tsd_tsdn(tsd));
ctl_postfork_parent(tsd_tsdn(tsd));
}
void
-jemalloc_postfork_child(void)
-{
+jemalloc_postfork_child(void) {
tsd_t *tsd;
unsigned i, narenas;
@@ -3012,16 +3224,19 @@ jemalloc_postfork_child(void)
tsd = tsd_fetch();
- witness_postfork_child(tsd);
+ witness_postfork_child(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
- base_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
arena_postfork_child(tsd_tsdn(tsd), arena);
+ }
}
prof_postfork_child(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_postfork_child(tsd_tsdn(tsd));
+ }
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
tcache_postfork_child(tsd_tsdn(tsd));
ctl_postfork_child(tsd_tsdn(tsd));
diff --git a/contrib/jemalloc/src/large.c b/contrib/jemalloc/src/large.c
new file mode 100644
index 000000000000..27a2c679876b
--- /dev/null
+++ b/contrib/jemalloc/src/large.c
@@ -0,0 +1,371 @@
+#define JEMALLOC_LARGE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/util.h"
+
+/******************************************************************************/
+
+void *
+large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
+ assert(usize == sz_s2u(usize));
+
+ return large_palloc(tsdn, arena, usize, CACHELINE, zero);
+}
+
+void *
+large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero) {
+ size_t ausize;
+ extent_t *extent;
+ bool is_zeroed;
+ UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ ausize = sz_sa2u(usize, alignment);
+ if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
+ return NULL;
+ }
+
+ if (config_fill && unlikely(opt_zero)) {
+ zero = true;
+ }
+ /*
+ * Copy zero into is_zeroed and pass the copy when allocating the
+ * extent, so that it is possible to make correct junk/zero fill
+ * decisions below, even if is_zeroed ends up true when zero is false.
+ */
+ is_zeroed = zero;
+ if (likely(!tsdn_null(tsdn))) {
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ }
+ if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
+ arena, usize, alignment, &is_zeroed)) == NULL) {
+ return NULL;
+ }
+
+ /* See comments in arena_bin_slabs_full_insert(). */
+ if (!arena_is_auto(arena)) {
+ /* Insert extent into large. */
+ malloc_mutex_lock(tsdn, &arena->large_mtx);
+ extent_list_append(&arena->large, extent);
+ malloc_mutex_unlock(tsdn, &arena->large_mtx);
+ }
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
+ prof_idump(tsdn);
+ }
+
+ if (zero) {
+ assert(is_zeroed);
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
+ extent_usize_get(extent));
+ }
+
+ arena_decay_tick(tsdn, arena);
+ return extent_addr_get(extent);
+}
+
+static void
+large_dalloc_junk_impl(void *ptr, size_t size) {
+ memset(ptr, JEMALLOC_FREE_JUNK, size);
+}
+large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
+
+static void
+large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
+ if (config_fill && have_dss && unlikely(opt_junk_free)) {
+ /*
+ * Only bother junk filling if the extent isn't about to be
+ * unmapped.
+ */
+ if (opt_retain || (have_dss && extent_in_dss(ptr))) {
+ large_dalloc_junk(ptr, size);
+ }
+ }
+}
+large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
+ large_dalloc_maybe_junk_impl;
+
+static bool
+large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldusize = extent_usize_get(extent);
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
+
+ assert(oldusize > usize);
+
+ if (extent_hooks->split == NULL) {
+ return true;
+ }
+
+ /* Split excess pages. */
+ if (diff != 0) {
+ extent_t *trail = extent_split_wrapper(tsdn, arena,
+ &extent_hooks, extent, usize + sz_large_pad,
+ sz_size2index(usize), false, diff, NSIZES, false);
+ if (trail == NULL) {
+ return true;
+ }
+
+ if (config_fill && unlikely(opt_junk_free)) {
+ large_dalloc_maybe_junk(extent_addr_get(trail),
+ extent_size_get(trail));
+ }
+
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
+ }
+
+ arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
+
+ return false;
+}
+
+static bool
+large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
+ bool zero) {
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldusize = extent_usize_get(extent);
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ size_t trailsize = usize - oldusize;
+
+ if (extent_hooks->merge == NULL) {
+ return true;
+ }
+
+ if (config_fill && unlikely(opt_zero)) {
+ zero = true;
+ }
+ /*
+ * Copy zero into is_zeroed_trail and pass the copy when allocating the
+ * extent, so that it is possible to make correct junk/zero fill
+ * decisions below, even if is_zeroed_trail ends up true when zero is
+ * false.
+ */
+ bool is_zeroed_trail = zero;
+ bool commit = true;
+ extent_t *trail;
+ bool new_mapping;
+ if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
+ CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
+ || (trail = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
+ CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
+ if (config_stats) {
+ new_mapping = false;
+ }
+ } else {
+ if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
+ extent_past_get(extent), trailsize, 0, CACHELINE, false,
+ NSIZES, &is_zeroed_trail, &commit)) == NULL) {
+ return true;
+ }
+ if (config_stats) {
+ new_mapping = true;
+ }
+ }
+
+ if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
+ extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
+ return true;
+ }
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ szind_t szind = sz_size2index(usize);
+ extent_szind_set(extent, szind);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(extent), szind, false);
+
+ if (config_stats && new_mapping) {
+ arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
+ }
+
+ if (zero) {
+ if (config_cache_oblivious) {
+ /*
+ * Zero the trailing bytes of the original allocation's
+ * last page, since they are in an indeterminate state.
+ * There will always be trailing bytes, because ptr's
+ * offset from the beginning of the extent is a multiple
+ * of CACHELINE in [0 .. PAGE).
+ */
+ void *zbase = (void *)
+ ((uintptr_t)extent_addr_get(extent) + oldusize);
+ void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
+ PAGE));
+ size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
+ assert(nzero > 0);
+ memset(zbase, 0, nzero);
+ }
+ assert(is_zeroed_trail);
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
+ JEMALLOC_ALLOC_JUNK, usize - oldusize);
+ }
+
+ arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
+
+ return false;
+}
+
+bool
+large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero) {
+ size_t oldusize = extent_usize_get(extent);
+
+ /* The following should have been caught by callers. */
+ assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
+ /* Both allocation sizes must be large to avoid a move. */
+ assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
+
+ if (usize_max > oldusize) {
+ /* Attempt to expand the allocation in-place. */
+ if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
+ zero)) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+ /* Try again, this time with usize_min. */
+ if (usize_min < usize_max && usize_min > oldusize &&
+ large_ralloc_no_move_expand(tsdn, extent, usize_min,
+ zero)) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+ }
+
+ /*
+ * Avoid moving the allocation if the existing extent size accommodates
+ * the new size.
+ */
+ if (oldusize >= usize_min && oldusize <= usize_max) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+
+ /* Attempt to shrink the allocation in-place. */
+ if (oldusize > usize_max) {
+ if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ }
+ }
+ return true;
+}
+
+static void *
+large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero) {
+ if (alignment <= CACHELINE) {
+ return large_malloc(tsdn, arena, usize, zero);
+ }
+ return large_palloc(tsdn, arena, usize, alignment, zero);
+}
+
+void *
+large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache) {
+ size_t oldusize = extent_usize_get(extent);
+
+ /* The following should have been caught by callers. */
+ assert(usize > 0 && usize <= LARGE_MAXCLASS);
+ /* Both allocation sizes must be large to avoid a move. */
+ assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
+
+ /* Try to avoid moving the allocation. */
+ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
+ return extent_addr_get(extent);
+ }
+
+ /*
+ * usize and old size are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
+ zero);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ size_t copysize = (usize < oldusize) ? usize : oldusize;
+ memcpy(ret, extent_addr_get(extent), copysize);
+ isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
+ return ret;
+}
+
+/*
+ * junked_locked indicates whether the extent's data have been junk-filled, and
+ * whether the arena's large_mtx is currently held.
+ */
+static void
+large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ bool junked_locked) {
+ if (!junked_locked) {
+ /* See comments in arena_bin_slabs_full_insert(). */
+ if (!arena_is_auto(arena)) {
+ malloc_mutex_lock(tsdn, &arena->large_mtx);
+ extent_list_remove(&arena->large, extent);
+ malloc_mutex_unlock(tsdn, &arena->large_mtx);
+ }
+ large_dalloc_maybe_junk(extent_addr_get(extent),
+ extent_usize_get(extent));
+ } else {
+ malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
+ if (!arena_is_auto(arena)) {
+ extent_list_remove(&arena->large, extent);
+ }
+ }
+ arena_extent_dalloc_large_prep(tsdn, arena, extent);
+}
+
+static void
+large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
+}
+
+void
+large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
+ large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
+}
+
+void
+large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
+ large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
+}
+
+void
+large_dalloc(tsdn_t *tsdn, extent_t *extent) {
+ arena_t *arena = extent_arena_get(extent);
+ large_dalloc_prep_impl(tsdn, arena, extent, false);
+ large_dalloc_finish_impl(tsdn, arena, extent);
+ arena_decay_tick(tsdn, arena);
+}
+
+size_t
+large_salloc(tsdn_t *tsdn, const extent_t *extent) {
+ return extent_usize_get(extent);
+}
+
+prof_tctx_t *
+large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
+ return extent_prof_tctx_get(extent);
+}
+
+void
+large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
+ extent_prof_tctx_set(extent, tctx);
+}
+
+void
+large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
+ large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
+}
diff --git a/contrib/jemalloc/src/util.c b/contrib/jemalloc/src/malloc_io.c
index a4ff287c1206..4363cb8350cb 100644
--- a/contrib/jemalloc/src/util.c
+++ b/contrib/jemalloc/src/malloc_io.c
@@ -1,15 +1,35 @@
+#define JEMALLOC_MALLOC_IO_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/util.h"
+
+#ifdef assert
+# undef assert
+#endif
+#ifdef not_reached
+# undef not_reached
+#endif
+#ifdef not_implemented
+# undef not_implemented
+#endif
+#ifdef assert_not_implemented
+# undef assert_not_implemented
+#endif
+
/*
* Define simple versions of assertion macros that won't recurse in case
* of assertion failures in malloc_*printf().
*/
-#define assert(e) do { \
+#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
abort(); \
} \
} while (0)
-#define not_reached() do { \
+#define not_reached() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
@@ -17,38 +37,39 @@
unreachable(); \
} while (0)
-#define not_implemented() do { \
+#define not_implemented() do { \
if (config_debug) { \
malloc_write("<jemalloc>: Not implemented\n"); \
abort(); \
} \
} while (0)
-#define JEMALLOC_UTIL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ not_implemented(); \
+ } \
+} while (0)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void wrtmessage(void *cbopaque, const char *s);
-#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
-static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+static void wrtmessage(void *cbopaque, const char *s);
+#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
-#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
-static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
-#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
-static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
-#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
-static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
+static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
size_t *slen_p);
/******************************************************************************/
/* malloc_message() setup. */
static void
-wrtmessage(void *cbopaque, const char *s)
-{
-
+wrtmessage(void *cbopaque, const char *s) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
@@ -69,9 +90,7 @@ JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
JEMALLOC_ATTR(visibility("hidden"))
void
-wrtmessage_1_0(const char *s1, const char *s2, const char *s3,
- const char *s4)
-{
+wrtmessage_1_0(const char *s1, const char *s2, const char *s3, const char *s4) {
wrtmessage(NULL, s1);
wrtmessage(NULL, s2);
@@ -88,13 +107,12 @@ __sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0);
* je_malloc_message(...) throughout the code.
*/
void
-malloc_write(const char *s)
-{
-
- if (je_malloc_message != NULL)
+malloc_write(const char *s) {
+ if (je_malloc_message != NULL) {
je_malloc_message(NULL, s);
- else
+ } else {
wrtmessage(NULL, s);
+ }
}
/*
@@ -102,28 +120,25 @@ malloc_write(const char *s)
* provide a wrapper.
*/
int
-buferror(int err, char *buf, size_t buflen)
-{
-
+buferror(int err, char *buf, size_t buflen) {
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL);
- return (0);
+ return 0;
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
- return (0);
+ return 0;
#else
- return (strerror_r(err, buf, buflen));
+ return strerror_r(err, buf, buflen);
#endif
}
uintmax_t
-malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
-{
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
uintmax_t ret, digit;
unsigned b;
bool neg;
@@ -168,10 +183,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
- if (b == 0)
+ if (b == 0) {
b = 8;
- if (b == 8)
+ }
+ if (b == 8) {
p++;
+ }
break;
case 'X': case 'x':
switch (p[2]) {
@@ -181,10 +198,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
- if (b == 0)
+ if (b == 0) {
b = 16;
- if (b == 16)
+ }
+ if (b == 16) {
p += 2;
+ }
break;
default:
break;
@@ -196,8 +215,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
goto label_return;
}
}
- if (b == 0)
+ if (b == 0) {
b = 10;
+ }
/* Convert. */
ret = 0;
@@ -215,8 +235,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
}
p++;
}
- if (neg)
+ if (neg) {
ret = (uintmax_t)(-((intmax_t)ret));
+ }
if (p == ns) {
/* No conversion performed. */
@@ -230,15 +251,15 @@ label_return:
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
- } else
+ } else {
*endptr = (char *)p;
+ }
}
- return (ret);
+ return ret;
}
static char *
-u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
-{
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
unsigned i;
i = U2S_BUFSIZE - 1;
@@ -276,23 +297,25 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
}}
*slen_p = U2S_BUFSIZE - 1 - i;
- return (&s[i]);
+ return &s[i];
}
static char *
-d2s(intmax_t x, char sign, char *s, size_t *slen_p)
-{
+d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
bool neg;
- if ((neg = (x < 0)))
+ if ((neg = (x < 0))) {
x = -x;
+ }
s = u2s(x, 10, false, s, slen_p);
- if (neg)
+ if (neg) {
sign = '-';
+ }
switch (sign) {
case '-':
- if (!neg)
+ if (!neg) {
break;
+ }
/* Fall through. */
case ' ':
case '+':
@@ -302,72 +325,70 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p)
break;
default: not_reached();
}
- return (s);
+ return s;
}
static char *
-o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
-{
-
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
(*slen_p)++;
*s = '0';
}
- return (s);
+ return s;
}
static char *
-x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
-{
-
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
- return (s);
+ return s;
}
size_t
-malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
-{
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
size_t i;
const char *f;
-#define APPEND_C(c) do { \
- if (i < size) \
+#define APPEND_C(c) do { \
+ if (i < size) { \
str[i] = (c); \
+ } \
i++; \
} while (0)
-#define APPEND_S(s, slen) do { \
+#define APPEND_S(s, slen) do { \
if (i < size) { \
size_t cpylen = (slen <= size - i) ? slen : size - i; \
memcpy(&str[i], s, cpylen); \
} \
i += slen; \
} while (0)
-#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
+#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
/* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
if (!left_justify && pad_len != 0) { \
size_t j; \
- for (j = 0; j < pad_len; j++) \
+ for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
+ } \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if (left_justify && pad_len != 0) { \
size_t j; \
- for (j = 0; j < pad_len; j++) \
+ for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
+ } \
} \
} while (0)
-#define GET_ARG_NUMERIC(val, len) do { \
+#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
@@ -475,10 +496,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
break;
}
/* Width/precision separator. */
- if (*f == '.')
+ if (*f == '.') {
f++;
- else
+ } else {
goto label_length;
+ }
/* Precision. */
switch (*f) {
case '*':
@@ -505,8 +527,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
if (*f == 'l') {
len = 'q';
f++;
- } else
+ } else {
len = 'l';
+ }
break;
case 'q': case 'j': case 't': case 'z':
len = *f;
@@ -597,22 +620,22 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}}
}
label_out:
- if (i < size)
+ if (i < size) {
str[i] = '\0';
- else
+ } else {
str[size - 1] = '\0';
+ }
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
- return (i);
+ return i;
}
JEMALLOC_FORMAT_PRINTF(3, 4)
size_t
-malloc_snprintf(char *str, size_t size, const char *format, ...)
-{
+malloc_snprintf(char *str, size_t size, const char *format, ...) {
size_t ret;
va_list ap;
@@ -620,13 +643,12 @@ malloc_snprintf(char *str, size_t size, const char *format, ...)
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
- return (ret);
+ return ret;
}
void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap)
-{
+ const char *format, va_list ap) {
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
@@ -651,8 +673,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
JEMALLOC_FORMAT_PRINTF(3, 4)
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...)
-{
+ const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -663,8 +684,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_FORMAT_PRINTF(1, 2)
void
-malloc_printf(const char *format, ...)
-{
+malloc_printf(const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -679,4 +699,5 @@ malloc_printf(const char *format, ...)
#undef assert
#undef not_reached
#undef not_implemented
+#undef assert_not_implemented
#include "jemalloc/internal/assert.h"
diff --git a/contrib/jemalloc/src/mb.c b/contrib/jemalloc/src/mb.c
deleted file mode 100644
index dc2c0a256fde..000000000000
--- a/contrib/jemalloc/src/mb.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_MB_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
index 13f8d799de1e..820af6133d9c 100644
--- a/contrib/jemalloc/src/mutex.c
+++ b/contrib/jemalloc/src/mutex.c
@@ -1,12 +1,12 @@
-#define JEMALLOC_MUTEX_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-#include <dlfcn.h>
-#endif
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
#ifndef _CRT_SPINCOUNT
-#define _CRT_SPINCOUNT 4000
+#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
@@ -20,10 +20,6 @@ static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static void pthread_create_once(void);
-#endif
-
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
@@ -31,33 +27,11 @@ static void pthread_create_once(void);
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
- void *(*)(void *), void *__restrict);
-
-static void
-pthread_create_once(void)
-{
-
- pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
- if (pthread_create_fptr == NULL) {
- malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
- "\"pthread_create\")\n");
- abort();
- }
-
- isthreaded = true;
-}
-
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
- void *__restrict arg)
-{
- static pthread_once_t once_control = PTHREAD_ONCE_INIT;
-
- pthread_once(&once_control, pthread_create_once);
-
- return (pthread_create_fptr(thread, attr, start_routine, arg));
+ void *__restrict arg) {
+ return pthread_create_wrapper(thread, attr, start_routine, arg);
}
#endif
@@ -79,17 +53,115 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
}
#endif
+void
+malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
+ mutex_prof_data_t *data = &mutex->prof_data;
+ UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
+
+ if (ncpus == 1) {
+ goto label_spin_done;
+ }
+
+ int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
+ do {
+ CPU_SPINWAIT;
+ if (!malloc_mutex_trylock_final(mutex)) {
+ data->n_spin_acquired++;
+ return;
+ }
+ } while (cnt++ < max_cnt);
+
+ if (!config_stats) {
+ /* Only spin is useful when stats is off. */
+ malloc_mutex_lock_final(mutex);
+ return;
+ }
+label_spin_done:
+ nstime_update(&before);
+ /* Copy before to after to avoid clock skews. */
+ nstime_t after;
+ nstime_copy(&after, &before);
+ uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
+ ATOMIC_RELAXED) + 1;
+ /* One last try as above two calls may take quite some cycles. */
+ if (!malloc_mutex_trylock_final(mutex)) {
+ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
+ data->n_spin_acquired++;
+ return;
+ }
+
+ /* True slow path. */
+ malloc_mutex_lock_final(mutex);
+ /* Update more slow-path only counters. */
+ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
+ nstime_update(&after);
+
+ nstime_t delta;
+ nstime_copy(&delta, &after);
+ nstime_subtract(&delta, &before);
+
+ data->n_wait_times++;
+ nstime_add(&data->tot_wait_time, &delta);
+ if (nstime_compare(&data->max_wait_time, &delta) < 0) {
+ nstime_copy(&data->max_wait_time, &delta);
+ }
+ if (n_thds > data->max_n_thds) {
+ data->max_n_thds = n_thds;
+ }
+}
+
+static void
+mutex_prof_data_init(mutex_prof_data_t *data) {
+ memset(data, 0, sizeof(mutex_prof_data_t));
+ nstime_init(&data->max_wait_time, 0);
+ nstime_init(&data->tot_wait_time, 0);
+ data->prev_owner = NULL;
+}
+
+void
+malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_assert_owner(tsdn, mutex);
+ mutex_prof_data_init(&mutex->prof_data);
+}
+
+static int
+mutex_addr_comp(const witness_t *witness1, void *mutex1,
+ const witness_t *witness2, void *mutex2) {
+ assert(mutex1 != NULL);
+ assert(mutex2 != NULL);
+ uintptr_t mu1int = (uintptr_t)mutex1;
+ uintptr_t mu2int = (uintptr_t)mutex2;
+ if (mu1int < mu2int) {
+ return -1;
+ } else if (mu1int == mu2int) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
bool
-malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
-{
+malloc_mutex_first_thread(void) {
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ return (malloc_mutex_first_thread());
+#else
+ return (false);
+#endif
+}
+bool
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
+ mutex_prof_data_init(&mutex->prof_data);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
InitializeSRWLock(&mutex->lock);
# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
- _CRT_SPINCOUNT))
- return (true);
+ _CRT_SPINCOUNT)) {
+ return true;
+ }
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex->lock = OS_UNFAIR_LOCK_INIT;
@@ -101,80 +173,72 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
- bootstrap_calloc) != 0)
- return (true);
+ bootstrap_calloc) != 0) {
+ return true;
+ }
}
#else
pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
- return (true);
+ if (pthread_mutexattr_init(&attr) != 0) {
+ return true;
+ }
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
- return (true);
+ return true;
}
pthread_mutexattr_destroy(&attr);
#endif
- if (config_debug)
- witness_init(&mutex->witness, name, rank, NULL);
- return (false);
+ if (config_debug) {
+ mutex->lock_order = lock_order;
+ if (lock_order == malloc_mutex_address_ordered) {
+ witness_init(&mutex->witness, name, rank,
+ mutex_addr_comp, &mutex);
+ } else {
+ witness_init(&mutex->witness, name, rank, NULL, NULL);
+ }
+ }
+ return false;
}
void
-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_lock(tsdn, mutex);
}
void
-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_unlock(tsdn, mutex);
}
void
-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsdn, mutex);
#else
if (malloc_mutex_init(mutex, mutex->witness.name,
- mutex->witness.rank)) {
+ mutex->witness.rank, mutex->lock_order)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
#endif
}
bool
-malloc_mutex_first_thread(void)
-{
-
+malloc_mutex_boot(void) {
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- bootstrap_calloc) != 0)
- return (true);
+ bootstrap_calloc) != 0) {
+ return true;
+ }
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
- return (false);
-}
-
-bool
-malloc_mutex_boot(void)
-{
-
-#ifndef JEMALLOC_MUTEX_INIT_CB
- return (malloc_mutex_first_thread());
-#else
- return (false);
-#endif
+ return false;
}
diff --git a/contrib/jemalloc/src/mutex_pool.c b/contrib/jemalloc/src/mutex_pool.c
new file mode 100644
index 000000000000..f24d10e44a80
--- /dev/null
+++ b/contrib/jemalloc/src/mutex_pool.c
@@ -0,0 +1,18 @@
+#define JEMALLOC_MUTEX_POOL_C_
+
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+
+bool
+mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
+ for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
+ if (malloc_mutex_init(&pool->mutexes[i], name, rank,
+ malloc_mutex_address_ordered)) {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/contrib/jemalloc/src/nstime.c b/contrib/jemalloc/src/nstime.c
index 0948e29faffa..71db353965ff 100644
--- a/contrib/jemalloc/src/nstime.c
+++ b/contrib/jemalloc/src/nstime.c
@@ -1,78 +1,83 @@
-#include "jemalloc/internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#define BILLION UINT64_C(1000000000)
+#include "jemalloc/internal/nstime.h"
-void
-nstime_init(nstime_t *time, uint64_t ns)
-{
+#include "jemalloc/internal/assert.h"
+
+#define BILLION UINT64_C(1000000000)
+#define MILLION UINT64_C(1000000)
+void
+nstime_init(nstime_t *time, uint64_t ns) {
time->ns = ns;
}
void
-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
-{
-
+nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
time->ns = sec * BILLION + nsec;
}
uint64_t
-nstime_ns(const nstime_t *time)
-{
-
- return (time->ns);
+nstime_ns(const nstime_t *time) {
+ return time->ns;
}
uint64_t
-nstime_sec(const nstime_t *time)
-{
-
- return (time->ns / BILLION);
+nstime_msec(const nstime_t *time) {
+ return time->ns / MILLION;
}
uint64_t
-nstime_nsec(const nstime_t *time)
-{
+nstime_sec(const nstime_t *time) {
+ return time->ns / BILLION;
+}
- return (time->ns % BILLION);
+uint64_t
+nstime_nsec(const nstime_t *time) {
+ return time->ns % BILLION;
}
void
-nstime_copy(nstime_t *time, const nstime_t *source)
-{
-
+nstime_copy(nstime_t *time, const nstime_t *source) {
*time = *source;
}
int
-nstime_compare(const nstime_t *a, const nstime_t *b)
-{
-
- return ((a->ns > b->ns) - (a->ns < b->ns));
+nstime_compare(const nstime_t *a, const nstime_t *b) {
+ return (a->ns > b->ns) - (a->ns < b->ns);
}
void
-nstime_add(nstime_t *time, const nstime_t *addend)
-{
-
+nstime_add(nstime_t *time, const nstime_t *addend) {
assert(UINT64_MAX - time->ns >= addend->ns);
time->ns += addend->ns;
}
void
-nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
-{
+nstime_iadd(nstime_t *time, uint64_t addend) {
+ assert(UINT64_MAX - time->ns >= addend);
+
+ time->ns += addend;
+}
+void
+nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
assert(nstime_compare(time, subtrahend) >= 0);
time->ns -= subtrahend->ns;
}
void
-nstime_imultiply(nstime_t *time, uint64_t multiplier)
-{
+nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
+ assert(time->ns >= subtrahend);
+
+ time->ns -= subtrahend;
+}
+void
+nstime_imultiply(nstime_t *time, uint64_t multiplier) {
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
@@ -80,28 +85,23 @@ nstime_imultiply(nstime_t *time, uint64_t multiplier)
}
void
-nstime_idivide(nstime_t *time, uint64_t divisor)
-{
-
+nstime_idivide(nstime_t *time, uint64_t divisor) {
assert(divisor != 0);
time->ns /= divisor;
}
uint64_t
-nstime_divide(const nstime_t *time, const nstime_t *divisor)
-{
-
+nstime_divide(const nstime_t *time, const nstime_t *divisor) {
assert(divisor->ns != 0);
- return (time->ns / divisor->ns);
+ return time->ns / divisor->ns;
}
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
FILETIME ft;
uint64_t ticks_100ns;
@@ -110,39 +110,34 @@ nstime_get(nstime_t *time)
nstime_init(time, ticks_100ns * 100);
}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
+#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
nstime_init2(time, ts.tv_sec, ts.tv_nsec);
}
-#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
-
+nstime_get(nstime_t *time) {
nstime_init(time, mach_absolute_time());
}
#else
# define NSTIME_MONOTONIC false
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
struct timeval tv;
gettimeofday(&tv, NULL);
@@ -150,30 +145,15 @@ nstime_get(nstime_t *time)
}
#endif
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
-#endif
-bool
-nstime_monotonic(void)
-{
-
- return (NSTIME_MONOTONIC);
+static bool
+nstime_monotonic_impl(void) {
+ return NSTIME_MONOTONIC;
#undef NSTIME_MONOTONIC
}
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
-nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
-#endif
+nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define nstime_update JEMALLOC_N(n_nstime_update)
-#endif
-bool
-nstime_update(nstime_t *time)
-{
+static bool
+nstime_update_impl(nstime_t *time) {
nstime_t old_time;
nstime_copy(&old_time, time);
@@ -182,13 +162,9 @@ nstime_update(nstime_t *time)
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
- return (true);
+ return true;
}
- return (false);
+ return false;
}
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define nstime_update JEMALLOC_N(nstime_update)
-nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
-#endif
+nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
diff --git a/contrib/jemalloc/src/pages.c b/contrib/jemalloc/src/pages.c
index 7698e49bff84..fec64dd01d77 100644
--- a/contrib/jemalloc/src/pages.c
+++ b/contrib/jemalloc/src/pages.c
@@ -1,5 +1,12 @@
-#define JEMALLOC_PAGES_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/pages.h"
+
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
@@ -8,6 +15,9 @@
/******************************************************************************/
/* Data. */
+/* Actual operating system page size, detected during bootstrap, <= PAGE. */
+static size_t os_page;
+
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
@@ -16,17 +26,26 @@ static int mmap_flags;
static bool os_overcommits;
/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-void *
-pages_map(void *addr, size_t size, bool *commit)
-{
- void *ret;
+static void os_pages_unmap(void *addr, size_t size);
+
+/******************************************************************************/
+static void *
+os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(ALIGNMENT_CEILING(size, os_page) == size);
assert(size != 0);
- if (os_overcommits)
+ if (os_overcommits) {
*commit = true;
+ }
+ void *ret;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
@@ -46,24 +65,54 @@ pages_map(void *addr, size_t size, bool *commit)
}
assert(ret != NULL);
- if (ret == MAP_FAILED)
+ if (ret == MAP_FAILED) {
ret = NULL;
- else if (addr != NULL && ret != addr) {
+ } else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
- pages_unmap(ret, size);
+ os_pages_unmap(ret, size);
ret = NULL;
}
#endif
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
- return (ret);
+ assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
+ ret == addr));
+ return ret;
}
-void
-pages_unmap(void *addr, size_t size)
-{
+static void *
+os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
+ bool *commit) {
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ os_pages_unmap(addr, alloc_size);
+ void *new_addr = os_pages_map(ret, size, PAGE, commit);
+ if (new_addr == ret) {
+ return ret;
+ }
+ if (new_addr != NULL) {
+ os_pages_unmap(new_addr, size);
+ }
+ return NULL;
+#else
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0) {
+ os_pages_unmap(addr, leadsize);
+ }
+ if (trailsize != 0) {
+ os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ }
+ return ret;
+#endif
+}
+
+static void
+os_pages_unmap(void *addr, size_t size) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(ALIGNMENT_CEILING(size, os_page) == size);
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
@@ -76,54 +125,90 @@ pages_unmap(void *addr, size_t size)
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
- "VirtualFree"
+ "VirtualFree"
#else
- "munmap"
+ "munmap"
#endif
- "(): %s\n", buf);
- if (opt_abort)
+ "(): %s\n", buf);
+ if (opt_abort) {
abort();
+ }
+ }
+}
+
+static void *
+pages_map_slow(size_t size, size_t alignment, bool *commit) {
+ size_t alloc_size = size + alignment - os_page;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size) {
+ return NULL;
}
+
+ void *ret;
+ do {
+ void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
+ if (pages == NULL) {
+ return NULL;
+ }
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
+ - (uintptr_t)pages;
+ ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ assert(PAGE_ADDR2BASE(ret) == ret);
+ return ret;
}
void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
- bool *commit)
-{
- void *ret = (void *)((uintptr_t)addr + leadsize);
+pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
+ assert(alignment >= PAGE);
+ assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
- assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
- {
- void *new_addr;
-
- pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size, commit);
- if (new_addr == ret)
- return (ret);
- if (new_addr)
- pages_unmap(new_addr, size);
- return (NULL);
- }
-#else
- {
- size_t trailsize = alloc_size - leadsize - size;
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in one or two calls to
+ * os_pages_unmap(), and it can leave holes in the process's virtual
+ * memory map if memory grows downward.
+ *
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
+ */
- if (leadsize != 0)
- pages_unmap(addr, leadsize);
- if (trailsize != 0)
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- return (ret);
+ void *ret = os_pages_map(addr, size, os_page, commit);
+ if (ret == NULL || ret == addr) {
+ return ret;
}
-#endif
+ assert(addr == NULL);
+ if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
+ os_pages_unmap(ret, size);
+ return pages_map_slow(size, alignment, commit);
+ }
+
+ assert(PAGE_ADDR2BASE(ret) == ret);
+ return ret;
+}
+
+void
+pages_unmap(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ os_pages_unmap(addr, size);
}
static bool
-pages_commit_impl(void *addr, size_t size, bool commit)
-{
+pages_commit_impl(void *addr, size_t size, bool commit) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
- if (os_overcommits)
- return (true);
+ if (os_overcommits) {
+ return true;
+ }
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
@@ -133,103 +218,123 @@ pages_commit_impl(void *addr, size_t size, bool commit)
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
-1, 0);
- if (result == MAP_FAILED)
- return (true);
+ if (result == MAP_FAILED) {
+ return true;
+ }
if (result != addr) {
/*
* We succeeded in mapping memory, but not in the right
* place.
*/
- pages_unmap(result, size);
- return (true);
+ os_pages_unmap(result, size);
+ return true;
}
- return (false);
+ return false;
}
#endif
}
bool
-pages_commit(void *addr, size_t size)
-{
-
- return (pages_commit_impl(addr, size, true));
+pages_commit(void *addr, size_t size) {
+ return pages_commit_impl(addr, size, true);
}
bool
-pages_decommit(void *addr, size_t size)
-{
-
- return (pages_commit_impl(addr, size, false));
+pages_decommit(void *addr, size_t size) {
+ return pages_commit_impl(addr, size, false);
}
bool
-pages_purge(void *addr, size_t size)
-{
- bool unzeroed;
+pages_purge_lazy(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ if (!pages_can_purge_lazy) {
+ return true;
+ }
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
- unzeroed = true;
-#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
- defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
-# if defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS false
-# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-# define JEMALLOC_MADV_ZEROS true
-# else
-# error No madvise(2) flag defined for purging unused dirty pages
-# endif
- int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
- unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
-# undef JEMALLOC_MADV_PURGE
-# undef JEMALLOC_MADV_ZEROS
+ return false;
+#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+ return (madvise(addr, size, MADV_FREE) != 0);
+#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
+ !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
+ return (madvise(addr, size, MADV_DONTNEED) != 0);
#else
- /* Last resort no-op. */
- unzeroed = true;
+ not_reached();
#endif
- return (unzeroed);
}
bool
-pages_huge(void *addr, size_t size)
-{
-
+pages_purge_forced(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
- return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+ if (!pages_can_purge_forced) {
+ return true;
+ }
+
+#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
+ return (madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_MAPS_COALESCE)
+ /* Try to overlay a new demand-zeroed mapping. */
+ return pages_commit(addr, size);
#else
- return (false);
+ not_reached();
#endif
}
bool
-pages_nohuge(void *addr, size_t size)
-{
+pages_huge(void *addr, size_t size) {
+ assert(HUGEPAGE_ADDR2BASE(addr) == addr);
+ assert(HUGEPAGE_CEILING(size) == size);
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
+#ifdef JEMALLOC_THP
+ return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#else
+ return true;
+#endif
+}
+
+bool
+pages_nohuge(void *addr, size_t size) {
+ assert(HUGEPAGE_ADDR2BASE(addr) == addr);
+ assert(HUGEPAGE_CEILING(size) == size);
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
- return (false);
+ return false;
+#endif
+}
+
+static size_t
+os_page_detect(void) {
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+#else
+ long result = sysconf(_SC_PAGESIZE);
+ if (result == -1) {
+ return LG_PAGE;
+ }
+ return (size_t)result;
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
-os_overcommits_sysctl(void)
-{
+os_overcommits_sysctl(void) {
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
- if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
- return (false); /* Error. */
+ if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
+ return false; /* Error. */
+ }
return ((vm_overcommit & 0x3) == 0);
}
@@ -242,19 +347,23 @@ os_overcommits_sysctl(void)
* wrappers.
*/
static bool
-os_overcommits_proc(void)
-{
+os_overcommits_proc(void) {
int fd;
char buf[1];
ssize_t nread;
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
- fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
+ fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
+ O_CLOEXEC);
+#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
+ fd = (int)syscall(SYS_openat,
+ AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
#else
- fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+ fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
#endif
- if (fd == -1)
- return (false); /* Error. */
+ if (fd == -1) {
+ return false; /* Error. */
+ }
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
@@ -268,8 +377,9 @@ os_overcommits_proc(void)
close(fd);
#endif
- if (nread < 1)
- return (false); /* Error. */
+ if (nread < 1) {
+ return false; /* Error. */
+ }
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
@@ -280,9 +390,16 @@ os_overcommits_proc(void)
}
#endif
-void
-pages_boot(void)
-{
+bool
+pages_boot(void) {
+ os_page = os_page_detect();
+ if (os_page > PAGE) {
+ malloc_write("<jemalloc>: Unsupported system page size\n");
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
@@ -293,10 +410,13 @@ pages_boot(void)
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
- if (os_overcommits)
+ if (os_overcommits) {
mmap_flags |= MAP_NORESERVE;
+ }
# endif
#else
os_overcommits = false;
#endif
+
+ return false;
}
diff --git a/contrib/jemalloc/src/prng.c b/contrib/jemalloc/src/prng.c
index 76646a2a4c34..83c04bf9b5dd 100644
--- a/contrib/jemalloc/src/prng.c
+++ b/contrib/jemalloc/src/prng.c
@@ -1,2 +1,3 @@
-#define JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_PRNG_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index c89dade1f192..61dfa2cee588 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -1,14 +1,29 @@
-#define JEMALLOC_PROF_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
-#define UNW_LOCAL_ONLY
+#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
+/*
+ * We have a circular dependency -- jemalloc_internal.h tells us if we should
+ * use libgcc's unwinding functionality, but after we've included that, we've
+ * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ */
+#undef _Unwind_Backtrace
#include <unwind.h>
+#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
#endif
/******************************************************************************/
@@ -63,7 +78,7 @@ size_t lg_prof_sample;
* creating/destroying mutexes.
*/
static malloc_mutex_t *gctx_locks;
-static unsigned cum_gctxs; /* Atomic counter. */
+static atomic_u_t cum_gctxs; /* Atomic counter. */
/*
* Table of mutexes that are shared among tdata's. No operations require
@@ -78,7 +93,8 @@ static malloc_mutex_t *tdata_locks;
* structure that knows about all backtraces currently captured.
*/
static ckh_t bt2gctx;
-static malloc_mutex_t bt2gctx_mtx;
+/* Non static to enable profiling. */
+malloc_mutex_t bt2gctx_mtx;
/*
* Tree of all extant prof_tdata_t structures, regardless of state,
@@ -132,9 +148,8 @@ static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
/******************************************************************************/
/* Red-black trees. */
-JEMALLOC_INLINE_C int
-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
-{
+static int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
uint64_t a_thr_uid = a->thr_uid;
uint64_t b_thr_uid = b->thr_uid;
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
@@ -150,30 +165,29 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
b_tctx_uid);
}
}
- return (ret);
+ return ret;
}
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
tctx_link, prof_tctx_comp)
-JEMALLOC_INLINE_C int
-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
-{
+static int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
unsigned a_len = a->bt.len;
unsigned b_len = b->bt.len;
unsigned comp_len = (a_len < b_len) ? a_len : b_len;
int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
- if (ret == 0)
+ if (ret == 0) {
ret = (a_len > b_len) - (a_len < b_len);
- return (ret);
+ }
+ return ret;
}
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
prof_gctx_comp)
-JEMALLOC_INLINE_C int
-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
-{
+static int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
int ret;
uint64_t a_uid = a->thr_uid;
uint64_t b_uid = b->thr_uid;
@@ -185,7 +199,7 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
}
- return (ret);
+ return ret;
}
rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
@@ -194,8 +208,7 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
/******************************************************************************/
void
-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
-{
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
prof_tdata_t *tdata;
cassert(config_prof);
@@ -208,26 +221,26 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
* programs.
*/
tdata = prof_tdata_get(tsd, true);
- if (tdata != NULL)
+ if (tdata != NULL) {
prof_sample_threshold_update(tdata);
+ }
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
prof_tctx_destroy(tsd, tctx);
- else
+ } else {
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
}
}
void
prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
-{
-
- prof_tctx_set(tsdn, ptr, usize, tctx);
+ prof_tctx_t *tctx) {
+ prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
malloc_mutex_lock(tsdn, tctx->tdata->lock);
tctx->cnts.curobjs++;
@@ -241,35 +254,30 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
}
void
-prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
-{
-
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
prof_tctx_destroy(tsd, tctx);
- else
+ } else {
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
}
void
-bt_init(prof_bt_t *bt, void **vec)
-{
-
+bt_init(prof_bt_t *bt, void **vec) {
cassert(config_prof);
bt->vec = vec;
bt->len = 0;
}
-JEMALLOC_INLINE_C void
-prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
-{
-
+static void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
@@ -281,10 +289,8 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
}
-JEMALLOC_INLINE_C void
-prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
-{
-
+static void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
@@ -300,17 +306,18 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
gdump = tdata->enq_gdump;
tdata->enq_gdump = false;
- if (idump)
+ if (idump) {
prof_idump(tsd_tsdn(tsd));
- if (gdump)
+ }
+ if (gdump) {
prof_gdump(tsd_tsdn(tsd));
+ }
}
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt)
-{
+prof_backtrace(prof_bt_t *bt) {
int nframes;
cassert(config_prof);
@@ -318,42 +325,41 @@ prof_backtrace(prof_bt_t *bt)
assert(bt->vec != NULL);
nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
- if (nframes <= 0)
+ if (nframes <= 0) {
return;
+ }
bt->len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
-{
-
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
cassert(config_prof);
- return (_URC_NO_REASON);
+ return _URC_NO_REASON;
}
static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg)
-{
+prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
void *ip;
cassert(config_prof);
ip = (void *)_Unwind_GetIP(context);
- if (ip == NULL)
- return (_URC_END_OF_STACK);
+ if (ip == NULL) {
+ return _URC_END_OF_STACK;
+ }
data->bt->vec[data->bt->len] = ip;
data->bt->len++;
- if (data->bt->len == data->max)
- return (_URC_END_OF_STACK);
+ if (data->bt->len == data->max) {
+ return _URC_END_OF_STACK;
+ }
- return (_URC_NO_REASON);
+ return _URC_NO_REASON;
}
void
-prof_backtrace(prof_bt_t *bt)
-{
+prof_backtrace(prof_bt_t *bt) {
prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
@@ -362,20 +368,22 @@ prof_backtrace(prof_bt_t *bt)
}
#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt)
-{
-#define BT_FRAME(i) \
+prof_backtrace(prof_bt_t *bt) {
+#define BT_FRAME(i) \
if ((i) < PROF_BT_MAX) { \
void *p; \
- if (__builtin_frame_address(i) == 0) \
+ if (__builtin_frame_address(i) == 0) { \
return; \
+ } \
p = __builtin_return_address(i); \
- if (p == NULL) \
+ if (p == NULL) { \
return; \
+ } \
bt->vec[(i)] = p; \
bt->len = (i) + 1; \
- } else \
- return;
+ } else { \
+ return; \
+ }
cassert(config_prof);
@@ -523,41 +531,36 @@ prof_backtrace(prof_bt_t *bt)
}
#else
void
-prof_backtrace(prof_bt_t *bt)
-{
-
+prof_backtrace(prof_bt_t *bt) {
cassert(config_prof);
not_reached();
}
#endif
static malloc_mutex_t *
-prof_gctx_mutex_choose(void)
-{
- unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
+prof_gctx_mutex_choose(void) {
+ unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
- return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
+ return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
}
static malloc_mutex_t *
-prof_tdata_mutex_choose(uint64_t thr_uid)
-{
-
- return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
+prof_tdata_mutex_choose(uint64_t thr_uid) {
+ return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
}
static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
-{
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
/*
* Create a single allocation that has space for vec of length bt->len.
*/
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
- size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
true);
- if (gctx == NULL)
- return (NULL);
+ if (gctx == NULL) {
+ return NULL;
+ }
gctx->lock = prof_gctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
@@ -569,14 +572,12 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
gctx->bt.vec = gctx->vec;
gctx->bt.len = bt->len;
- return (gctx);
+ return gctx;
}
static void
prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
- prof_tdata_t *tdata)
-{
-
+ prof_tdata_t *tdata) {
cassert(config_prof);
/*
@@ -591,12 +592,13 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
- if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
not_reached();
+ }
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
@@ -609,36 +611,37 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
}
static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
-{
-
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
- if (opt_prof_accum)
- return (false);
- if (tctx->cnts.curobjs != 0)
- return (false);
- if (tctx->prepared)
- return (false);
- return (true);
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (tctx->cnts.curobjs != 0) {
+ return false;
+ }
+ if (tctx->prepared) {
+ return false;
+ }
+ return true;
}
static bool
-prof_gctx_should_destroy(prof_gctx_t *gctx)
-{
-
- if (opt_prof_accum)
- return (false);
- if (!tctx_tree_empty(&gctx->tctxs))
- return (false);
- if (gctx->nlimbo != 0)
- return (false);
- return (true);
+prof_gctx_should_destroy(prof_gctx_t *gctx) {
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (!tctx_tree_empty(&gctx->tctxs)) {
+ return false;
+ }
+ if (gctx->nlimbo != 0) {
+ return false;
+ }
+ return true;
}
static void
-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
-{
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
prof_tdata_t *tdata = tctx->tdata;
prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx;
@@ -677,8 +680,9 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
*/
gctx->nlimbo++;
destroy_gctx = true;
- } else
+ } else {
destroy_gctx = false;
+ }
break;
case prof_tctx_state_dumping:
/*
@@ -703,21 +707,22 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
- if (destroy_tdata)
+ if (destroy_tdata) {
prof_tdata_destroy(tsd, tdata, false);
+ }
- if (destroy_tctx)
- idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
+ if (destroy_tctx) {
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
+ }
}
static bool
prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
- void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
-{
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
union {
prof_gctx_t *p;
void *v;
- } gctx;
+ } gctx, tgctx;
union {
prof_bt_t *p;
void *v;
@@ -727,20 +732,32 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
- if (gctx.v == NULL) {
- prof_leave(tsd, tdata);
- return (true);
+ prof_leave(tsd, tdata);
+ tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (tgctx.v == NULL) {
+ return true;
}
- btkey.p = &gctx.p->bt;
- if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
- /* OOM. */
- prof_leave(tsd, tdata);
- idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
- return (true);
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ gctx.p = tgctx.p;
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
+ true, true);
+ return true;
+ }
+ new_gctx = true;
+ } else {
+ new_gctx = false;
}
- new_gctx = true;
} else {
+ tgctx.v = NULL;
+ new_gctx = false;
+ }
+
+ if (!new_gctx) {
/*
* Increment nlimbo, in order to avoid a race condition with
* prof_tctx_destroy()/prof_gctx_try_destroy().
@@ -749,18 +766,23 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
gctx.p->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
new_gctx = false;
+
+ if (tgctx.v != NULL) {
+ /* Lost race to insert. */
+ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
+ true);
+ }
}
prof_leave(tsd, tdata);
*p_btkey = btkey.v;
*p_gctx = gctx.p;
*p_new_gctx = new_gctx;
- return (false);
+ return false;
}
prof_tctx_t *
-prof_lookup(tsd_t *tsd, prof_bt_t *bt)
-{
+prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
union {
prof_tctx_t *p;
void *v;
@@ -771,13 +793,15 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
cassert(config_prof);
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
- return (NULL);
+ if (tdata == NULL) {
+ return NULL;
+ }
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
- if (!not_found) /* Note double negative! */
+ if (!not_found) { /* Note double negative! */
ret.p->prepared = true;
+ }
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (not_found) {
void *btkey;
@@ -789,17 +813,19 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
* cache.
*/
if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
- &new_gctx))
- return (NULL);
+ &new_gctx)) {
+ return NULL;
+ }
/* Link a prof_tctx_t into gctx for this thread. */
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
- size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
arena_ichoose(tsd, NULL), true);
if (ret.p == NULL) {
- if (new_gctx)
+ if (new_gctx) {
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- return (NULL);
+ }
+ return NULL;
}
ret.p->tdata = tdata;
ret.p->thr_uid = tdata->thr_uid;
@@ -813,10 +839,11 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (error) {
- if (new_gctx)
+ if (new_gctx) {
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
- return (NULL);
+ }
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
+ return NULL;
}
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
ret.p->state = prof_tctx_state_nominal;
@@ -825,7 +852,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
}
- return (ret.p);
+ return ret.p;
}
/*
@@ -842,14 +869,14 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
* -mno-sse) in order for the workaround to be complete.
*/
void
-prof_sample_threshold_update(prof_tdata_t *tdata)
-{
+prof_sample_threshold_update(prof_tdata_t *tdata) {
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
- if (!config_prof)
+ if (!config_prof) {
return;
+ }
if (lg_prof_sample == 0) {
tdata->bytes_until_sample = 0;
@@ -884,18 +911,17 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
#ifdef JEMALLOC_JET
static prof_tdata_t *
-prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
size_t *tdata_count = (size_t *)arg;
(*tdata_count)++;
- return (NULL);
+ return NULL;
}
size_t
-prof_tdata_count(void)
-{
+prof_tdata_count(void) {
size_t tdata_count = 0;
tsdn_t *tsdn;
@@ -905,59 +931,48 @@ prof_tdata_count(void)
(void *)&tdata_count);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
- return (tdata_count);
+ return tdata_count;
}
-#endif
-#ifdef JEMALLOC_JET
size_t
-prof_bt_count(void)
-{
+prof_bt_count(void) {
size_t bt_count;
tsd_t *tsd;
prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
- return (0);
+ if (tdata == NULL) {
+ return 0;
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
- return (bt_count);
+ return bt_count;
}
#endif
-#ifdef JEMALLOC_JET
-#undef prof_dump_open
-#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
-#endif
static int
-prof_dump_open(bool propagate_err, const char *filename)
-{
+prof_dump_open_impl(bool propagate_err, const char *filename) {
int fd;
fd = creat(filename, 0644);
if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
- return (fd);
+ return fd;
}
-#ifdef JEMALLOC_JET
-#undef prof_dump_open
-#define prof_dump_open JEMALLOC_N(prof_dump_open)
-prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
-#endif
+prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
static bool
-prof_dump_flush(bool propagate_err)
-{
+prof_dump_flush(bool propagate_err) {
bool ret = false;
ssize_t err;
@@ -968,19 +983,19 @@ prof_dump_flush(bool propagate_err)
if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
ret = true;
}
prof_dump_buf_end = 0;
- return (ret);
+ return ret;
}
static bool
-prof_dump_close(bool propagate_err)
-{
+prof_dump_close(bool propagate_err) {
bool ret;
assert(prof_dump_fd != -1);
@@ -988,12 +1003,11 @@ prof_dump_close(bool propagate_err)
close(prof_dump_fd);
prof_dump_fd = -1;
- return (ret);
+ return ret;
}
static bool
-prof_dump_write(bool propagate_err, const char *s)
-{
+prof_dump_write(bool propagate_err, const char *s) {
size_t i, slen, n;
cassert(config_prof);
@@ -1002,9 +1016,11 @@ prof_dump_write(bool propagate_err, const char *s)
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
- if (prof_dump_flush(propagate_err) && propagate_err)
- return (true);
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ if (prof_dump_flush(propagate_err) && propagate_err) {
+ return true;
+ }
+ }
if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
/* Finish writing. */
@@ -1018,13 +1034,12 @@ prof_dump_write(bool propagate_err, const char *s)
i += n;
}
- return (false);
+ return false;
}
JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
-prof_dump_printf(bool propagate_err, const char *format, ...)
-{
+prof_dump_printf(bool propagate_err, const char *format, ...) {
bool ret;
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
@@ -1034,13 +1049,11 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
va_end(ap);
ret = prof_dump_write(propagate_err, buf);
- return (ret);
+ return ret;
}
static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
-{
-
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
malloc_mutex_lock(tsdn, tctx->gctx->lock);
@@ -1071,9 +1084,7 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
}
static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
-{
-
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
malloc_mutex_assert_owner(tsdn, gctx->lock);
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
@@ -1085,8 +1096,7 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
}
static prof_tctx_t *
-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
-{
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
@@ -1103,7 +1113,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
not_reached();
}
- return (NULL);
+ return NULL;
}
struct prof_tctx_dump_iter_arg_s {
@@ -1112,8 +1122,7 @@ struct prof_tctx_dump_iter_arg_s {
};
static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
-{
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
struct prof_tctx_dump_iter_arg_s *arg =
(struct prof_tctx_dump_iter_arg_s *)opaque;
@@ -1130,18 +1139,18 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
- tctx->dump_cnts.accumbytes))
- return (tctx);
+ tctx->dump_cnts.accumbytes)) {
+ return tctx;
+ }
break;
default:
not_reached();
}
- return (NULL);
+ return NULL;
}
static prof_tctx_t *
-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
-{
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
@@ -1163,13 +1172,11 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
ret = NULL;
label_return:
- return (ret);
+ return ret;
}
static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
-{
-
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
cassert(config_prof);
malloc_mutex_lock(tsdn, gctx->lock);
@@ -1193,24 +1200,23 @@ struct prof_gctx_merge_iter_arg_s {
};
static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
-{
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
struct prof_gctx_merge_iter_arg_s *arg =
(struct prof_gctx_merge_iter_arg_s *)opaque;
malloc_mutex_lock(arg->tsdn, gctx->lock);
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
(void *)arg->tsdn);
- if (gctx->cnt_summed.curobjs != 0)
+ if (gctx->cnt_summed.curobjs != 0) {
arg->leak_ngctx++;
+ }
malloc_mutex_unlock(arg->tsdn, gctx->lock);
- return (NULL);
+ return NULL;
}
static void
-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
-{
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
prof_tdata_t *tdata = prof_tdata_get(tsd, false);
prof_gctx_t *gctx;
@@ -1238,9 +1244,10 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
tctx_tree_remove(&gctx->tctxs,
to_destroy);
idalloctm(tsd_tsdn(tsd), to_destroy,
- NULL, true, true);
- } else
+ NULL, NULL, true, true);
+ } else {
next = NULL;
+ }
} while (next != NULL);
}
gctx->nlimbo--;
@@ -1248,8 +1255,9 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- } else
+ } else {
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
}
}
@@ -1260,8 +1268,7 @@ struct prof_tdata_merge_iter_arg_s {
static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque)
-{
+ void *opaque) {
struct prof_tdata_merge_iter_arg_s *arg =
(struct prof_tdata_merge_iter_arg_s *)opaque;
@@ -1276,8 +1283,9 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
tdata->dumping = true;
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
- &tctx.v);)
+ &tctx.v);) {
prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ }
arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
@@ -1285,20 +1293,22 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
}
- } else
+ } else {
tdata->dumping = false;
+ }
malloc_mutex_unlock(arg->tsdn, tdata->lock);
- return (NULL);
+ return NULL;
}
static prof_tdata_t *
-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
bool propagate_err = *(bool *)arg;
- if (!tdata->dumping)
- return (NULL);
+ if (!tdata->dumping) {
+ return NULL;
+ }
if (prof_dump_printf(propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
@@ -1306,43 +1316,36 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
tdata->cnt_summed.accumbytes,
(tdata->thread_name != NULL) ? " " : "",
- (tdata->thread_name != NULL) ? tdata->thread_name : ""))
- return (tdata);
- return (NULL);
+ (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
+ return tdata;
+ }
+ return NULL;
}
-#ifdef JEMALLOC_JET
-#undef prof_dump_header
-#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
-#endif
static bool
-prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
-{
+prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
+ const prof_cnt_t *cnt_all) {
bool ret;
if (prof_dump_printf(propagate_err,
"heap_v2/%"FMTu64"\n"
" t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
- cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
- return (true);
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
+ return true;
+ }
malloc_mutex_lock(tsdn, &tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
- return (ret);
+ return ret;
}
-#ifdef JEMALLOC_JET
-#undef prof_dump_header
-#define prof_dump_header JEMALLOC_N(prof_dump_header)
-prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
-#endif
+prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
static bool
prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
-{
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
bool ret;
unsigned i;
struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
@@ -1392,14 +1395,13 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
ret = false;
label_return:
- return (ret);
+ return ret;
}
#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
-prof_open_maps(const char *format, ...)
-{
+prof_open_maps(const char *format, ...) {
int mfd;
va_list ap;
char filename[PATH_MAX + 1];
@@ -1407,26 +1409,23 @@ prof_open_maps(const char *format, ...)
va_start(ap, format);
malloc_vsnprintf(filename, sizeof(filename), format, ap);
va_end(ap);
- mfd = open(filename, O_RDONLY);
+ mfd = open(filename, O_RDONLY | O_CLOEXEC);
- return (mfd);
+ return mfd;
}
#endif
static int
-prof_getpid(void)
-{
-
+prof_getpid(void) {
#ifdef _WIN32
- return (GetCurrentProcessId());
+ return GetCurrentProcessId();
#else
- return (getpid());
+ return getpid();
#endif
}
static bool
-prof_dump_maps(bool propagate_err)
-{
+prof_dump_maps(bool propagate_err) {
bool ret;
int mfd;
@@ -1440,8 +1439,9 @@ prof_dump_maps(bool propagate_err)
int pid = prof_getpid();
mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
- if (mfd == -1)
+ if (mfd == -1) {
mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
}
#endif
if (mfd != -1) {
@@ -1473,9 +1473,10 @@ prof_dump_maps(bool propagate_err)
ret = false;
label_return:
- if (mfd != -1)
+ if (mfd != -1) {
close(mfd);
- return (ret);
+ }
+ return ret;
}
/*
@@ -1484,9 +1485,7 @@ label_return:
*/
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
- const char *filename)
-{
-
+ const char *filename) {
#ifdef JEMALLOC_PROF
/*
* Scaling is equivalent AdjustSamples() in jeprof, but the result may
@@ -1521,8 +1520,7 @@ struct prof_gctx_dump_iter_arg_s {
};
static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
-{
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
prof_gctx_t *ret;
struct prof_gctx_dump_iter_arg_s *arg =
(struct prof_gctx_dump_iter_arg_s *)opaque;
@@ -1538,104 +1536,182 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
ret = NULL;
label_return:
malloc_mutex_unlock(arg->tsdn, gctx->lock);
- return (ret);
+ return ret;
}
-static bool
-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
-{
- prof_tdata_t *tdata;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+static void
+prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
+ struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
+ struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
+ prof_gctx_tree_t *gctxs) {
size_t tabind;
union {
prof_gctx_t *p;
void *v;
} gctx;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
- prof_gctx_tree_t gctxs;
-
- cassert(config_prof);
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
- return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
prof_enter(tsd, tdata);
/*
* Put gctx's in limbo and clear their counters in preparation for
* summing.
*/
- gctx_tree_new(&gctxs);
- for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
- prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
+ gctx_tree_new(gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
+ }
/*
* Iterate over tdatas, and for the non-expired ones snapshot their tctx
* stats and merge them into the associated gctx's.
*/
- prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
- memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
+ prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
+ memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
- (void *)&prof_tdata_merge_iter_arg);
+ (void *)prof_tdata_merge_iter_arg);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
/* Merge tctx stats into gctx's. */
- prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
- prof_gctx_merge_iter_arg.leak_ngctx = 0;
- gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
- (void *)&prof_gctx_merge_iter_arg);
+ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
+ prof_gctx_merge_iter_arg->leak_ngctx = 0;
+ gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
+ (void *)prof_gctx_merge_iter_arg);
prof_leave(tsd, tdata);
+}
+static bool
+prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck, prof_tdata_t *tdata,
+ struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
+ struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
+ struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
+ prof_gctx_tree_t *gctxs) {
/* Create dump file. */
- if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
- goto label_open_close_error;
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
+ return true;
+ }
/* Dump profile header. */
if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
- &prof_tdata_merge_iter_arg.cnt_all))
+ &prof_tdata_merge_iter_arg->cnt_all)) {
goto label_write_error;
+ }
/* Dump per gctx profile stats. */
- prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
- prof_gctx_dump_iter_arg.propagate_err = propagate_err;
- if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
- (void *)&prof_gctx_dump_iter_arg) != NULL)
+ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
+ prof_gctx_dump_iter_arg->propagate_err = propagate_err;
+ if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
+ (void *)prof_gctx_dump_iter_arg) != NULL) {
goto label_write_error;
+ }
/* Dump /proc/<pid>/maps if possible. */
- if (prof_dump_maps(propagate_err))
+ if (prof_dump_maps(propagate_err)) {
goto label_write_error;
+ }
+
+ if (prof_dump_close(propagate_err)) {
+ return true;
+ }
+
+ return false;
+label_write_error:
+ prof_dump_close(propagate_err);
+ return true;
+}
+
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck) {
+ cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t * tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
- if (prof_dump_close(propagate_err))
- goto label_open_close_error;
+ pre_reentrancy(tsd);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ prof_gctx_tree_t gctxs;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
+ &prof_gctx_merge_iter_arg, &gctxs);
+ bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
+ &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
+ &prof_gctx_dump_iter_arg, &gctxs);
prof_gctx_finish(tsd, &gctxs);
+
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ post_reentrancy(tsd);
+
+ if (err) {
+ return true;
+ }
if (leakcheck) {
prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
prof_gctx_merge_iter_arg.leak_ngctx, filename);
}
- return (false);
-label_write_error:
- prof_dump_close(propagate_err);
-label_open_close_error:
+ return false;
+}
+
+#ifdef JEMALLOC_JET
+void
+prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
+ uint64_t *accumbytes) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ prof_gctx_tree_t gctxs;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ if (curobjs != NULL) {
+ *curobjs = 0;
+ }
+ if (curbytes != NULL) {
+ *curbytes = 0;
+ }
+ if (accumobjs != NULL) {
+ *accumobjs = 0;
+ }
+ if (accumbytes != NULL) {
+ *accumbytes = 0;
+ }
+ return;
+ }
+
+ prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
+ &prof_gctx_merge_iter_arg, &gctxs);
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
- return (true);
+
+ if (curobjs != NULL) {
+ *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
+ }
+ if (curbytes != NULL) {
+ *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
+ }
+ if (accumobjs != NULL) {
+ *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
+ }
+ if (accumbytes != NULL) {
+ *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
+ }
}
+#endif
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
-#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
-prof_dump_filename(char *filename, char v, uint64_t vseq)
-{
-
+prof_dump_filename(char *filename, char v, uint64_t vseq) {
cassert(config_prof);
if (vseq != VSEQ_INVALID) {
@@ -1653,8 +1729,7 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
}
static void
-prof_fdump(void)
-{
+prof_fdump(void) {
tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
@@ -1662,9 +1737,11 @@ prof_fdump(void)
assert(opt_prof_final);
assert(opt_prof_prefix[0] != '\0');
- if (!prof_booted)
+ if (!prof_booted) {
return;
+ }
tsd = tsd_fetch();
+ assert(tsd_reentrancy_level_get(tsd) == 0);
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID);
@@ -1672,20 +1749,41 @@ prof_fdump(void)
prof_dump(tsd, false, filename, opt_prof_leak);
}
+bool
+prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
+ cassert(config_prof);
+
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
+ WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ prof_accum->accumbytes = 0;
+#else
+ atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
+#endif
+ return false;
+}
+
void
-prof_idump(tsdn_t *tsdn)
-{
+prof_idump(tsdn_t *tsdn) {
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted || tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
+ if (tsd_reentrancy_level_get(tsd) > 0) {
+ return;
+ }
+
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
+ if (tdata == NULL) {
return;
+ }
if (tdata->enq) {
tdata->enq_idump = true;
return;
@@ -1702,42 +1800,47 @@ prof_idump(tsdn_t *tsdn)
}
bool
-prof_mdump(tsd_t *tsd, const char *filename)
-{
- char filename_buf[DUMP_FILENAME_BUFSIZE];
-
+prof_mdump(tsd_t *tsd, const char *filename) {
cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
- if (!opt_prof || !prof_booted)
- return (true);
-
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0')
- return (true);
+ if (opt_prof_prefix[0] == '\0') {
+ return true;
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
filename = filename_buf;
}
- return (prof_dump(tsd, true, filename, false));
+ return prof_dump(tsd, true, filename, false);
}
void
-prof_gdump(tsdn_t *tsdn)
-{
+prof_gdump(tsdn_t *tsdn) {
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted || tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
+ if (tsd_reentrancy_level_get(tsd) > 0) {
+ return;
+ }
+
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
+ if (tdata == NULL) {
return;
+ }
if (tdata->enq) {
tdata->enq_gdump = true;
return;
@@ -1754,8 +1857,7 @@ prof_gdump(tsdn_t *tsdn)
}
static void
-prof_bt_hash(const void *key, size_t r_hash[2])
-{
+prof_bt_hash(const void *key, size_t r_hash[2]) {
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
@@ -1764,21 +1866,20 @@ prof_bt_hash(const void *key, size_t r_hash[2])
}
static bool
-prof_bt_keycomp(const void *k1, const void *k2)
-{
+prof_bt_keycomp(const void *k1, const void *k2) {
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
cassert(config_prof);
- if (bt1->len != bt2->len)
- return (false);
+ if (bt1->len != bt2->len) {
+ return false;
+ }
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-JEMALLOC_INLINE_C uint64_t
-prof_thr_uid_alloc(tsdn_t *tsdn)
-{
+static uint64_t
+prof_thr_uid_alloc(tsdn_t *tsdn) {
uint64_t thr_uid;
malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
@@ -1786,23 +1887,23 @@ prof_thr_uid_alloc(tsdn_t *tsdn)
next_thr_uid++;
malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
- return (thr_uid);
+ return thr_uid;
}
static prof_tdata_t *
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
- char *thread_name, bool active)
-{
+ char *thread_name, bool active) {
prof_tdata_t *tdata;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
- size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
- if (tdata == NULL)
- return (NULL);
+ if (tdata == NULL) {
+ return NULL;
+ }
tdata->lock = prof_tdata_mutex_choose(thr_uid);
tdata->thr_uid = thr_uid;
@@ -1814,8 +1915,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp)) {
- idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
- return (NULL);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+ return NULL;
}
tdata->prng_state = (uint64_t)(uintptr_t)tdata;
@@ -1832,67 +1933,60 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata_tree_insert(&tdatas, tdata);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
- return (tdata);
+ return tdata;
}
prof_tdata_t *
-prof_tdata_init(tsd_t *tsd)
-{
-
- return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
- NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
+prof_tdata_init(tsd_t *tsd) {
+ return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
+ NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
}
static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
-{
-
- if (tdata->attached && !even_if_attached)
- return (false);
- if (ckh_count(&tdata->bt2tctx) != 0)
- return (false);
- return (true);
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
+ if (tdata->attached && !even_if_attached) {
+ return false;
+ }
+ if (ckh_count(&tdata->bt2tctx) != 0) {
+ return false;
+ }
+ return true;
}
static bool
prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached)
-{
-
+ bool even_if_attached) {
malloc_mutex_assert_owner(tsdn, tdata->lock);
- return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+ return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
}
static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached)
-{
-
+ bool even_if_attached) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
tdata_tree_remove(&tdatas, tdata);
assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
- if (tdata->thread_name != NULL)
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ }
ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
}
static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
-{
-
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
}
static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
-{
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
bool destroy_tdata;
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
@@ -1903,19 +1997,21 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
*/
- if (!destroy_tdata)
+ if (!destroy_tdata) {
tdata->attached = false;
+ }
tsd_prof_tdata_set(tsd, NULL);
- } else
+ } else {
destroy_tdata = false;
+ }
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (destroy_tdata)
+ if (destroy_tdata) {
prof_tdata_destroy(tsd, tdata, true);
+ }
}
prof_tdata_t *
-prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
-{
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
@@ -1923,13 +2019,12 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
- return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
- active));
+ return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active);
}
static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
-{
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
bool destroy_tdata;
malloc_mutex_lock(tsdn, tdata->lock);
@@ -1937,24 +2032,24 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
tdata->expired = true;
destroy_tdata = tdata->attached ? false :
prof_tdata_should_destroy(tsdn, tdata, false);
- } else
+ } else {
destroy_tdata = false;
+ }
malloc_mutex_unlock(tsdn, tdata->lock);
- return (destroy_tdata);
+ return destroy_tdata;
}
static prof_tdata_t *
-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
}
void
-prof_reset(tsd_t *tsd, size_t lg_sample)
-{
+prof_reset(tsd_t *tsd, size_t lg_sample) {
prof_tdata_t *next;
assert(lg_sample < (sizeof(uint64_t) << 3));
@@ -1971,8 +2066,9 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false);
- } else
+ } else {
next = NULL;
+ }
} while (next != NULL);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
@@ -1980,179 +2076,179 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
}
void
-prof_tdata_cleanup(tsd_t *tsd)
-{
+prof_tdata_cleanup(tsd_t *tsd) {
prof_tdata_t *tdata;
- if (!config_prof)
+ if (!config_prof) {
return;
+ }
tdata = tsd_prof_tdata_get(tsd);
- if (tdata != NULL)
+ if (tdata != NULL) {
prof_tdata_detach(tsd, tdata);
+ }
}
bool
-prof_active_get(tsdn_t *tsdn)
-{
+prof_active_get(tsdn_t *tsdn) {
bool prof_active_current;
malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_current = prof_active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
- return (prof_active_current);
+ return prof_active_current;
}
bool
-prof_active_set(tsdn_t *tsdn, bool active)
-{
+prof_active_set(tsdn_t *tsdn, bool active) {
bool prof_active_old;
malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_old = prof_active;
prof_active = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
- return (prof_active_old);
+ return prof_active_old;
}
const char *
-prof_thread_name_get(tsd_t *tsd)
-{
+prof_thread_name_get(tsd_t *tsd) {
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
- return ("");
+ if (tdata == NULL) {
+ return "";
+ }
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
-{
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
char *ret;
size_t size;
- if (thread_name == NULL)
- return (NULL);
+ if (thread_name == NULL) {
+ return NULL;
+ }
size = strlen(thread_name) + 1;
- if (size == 1)
- return ("");
+ if (size == 1) {
+ return "";
+ }
- ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
+ ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
- if (ret == NULL)
- return (NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
memcpy(ret, thread_name, size);
- return (ret);
+ return ret;
}
int
-prof_thread_name_set(tsd_t *tsd, const char *thread_name)
-{
+prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
prof_tdata_t *tdata;
unsigned i;
char *s;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
- return (EAGAIN);
+ if (tdata == NULL) {
+ return EAGAIN;
+ }
/* Validate input. */
- if (thread_name == NULL)
- return (EFAULT);
+ if (thread_name == NULL) {
+ return EFAULT;
+ }
for (i = 0; thread_name[i] != '\0'; i++) {
char c = thread_name[i];
- if (!isgraph(c) && !isblank(c))
- return (EFAULT);
+ if (!isgraph(c) && !isblank(c)) {
+ return EFAULT;
+ }
}
s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
- if (s == NULL)
- return (EAGAIN);
+ if (s == NULL) {
+ return EAGAIN;
+ }
if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
tdata->thread_name = NULL;
}
- if (strlen(s) > 0)
+ if (strlen(s) > 0) {
tdata->thread_name = s;
- return (0);
+ }
+ return 0;
}
bool
-prof_thread_active_get(tsd_t *tsd)
-{
+prof_thread_active_get(tsd_t *tsd) {
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
- return (false);
- return (tdata->active);
+ if (tdata == NULL) {
+ return false;
+ }
+ return tdata->active;
}
bool
-prof_thread_active_set(tsd_t *tsd, bool active)
-{
+prof_thread_active_set(tsd_t *tsd, bool active) {
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
- return (true);
+ if (tdata == NULL) {
+ return true;
+ }
tdata->active = active;
- return (false);
+ return false;
}
bool
-prof_thread_active_init_get(tsdn_t *tsdn)
-{
+prof_thread_active_init_get(tsdn_t *tsdn) {
bool active_init;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
- return (active_init);
+ return active_init;
}
bool
-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
-{
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
bool active_init_old;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
- return (active_init_old);
+ return active_init_old;
}
bool
-prof_gdump_get(tsdn_t *tsdn)
-{
+prof_gdump_get(tsdn_t *tsdn) {
bool prof_gdump_current;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
- return (prof_gdump_current);
+ return prof_gdump_current;
}
bool
-prof_gdump_set(tsdn_t *tsdn, bool gdump)
-{
+prof_gdump_set(tsdn_t *tsdn, bool gdump) {
bool prof_gdump_old;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
- return (prof_gdump_old);
+ return prof_gdump_old;
}
void
-prof_boot0(void)
-{
-
+prof_boot0(void) {
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
@@ -2160,9 +2256,7 @@ prof_boot0(void)
}
void
-prof_boot1(void)
-{
-
+prof_boot1(void) {
cassert(config_prof);
/*
@@ -2186,9 +2280,7 @@ prof_boot1(void)
}
bool
-prof_boot2(tsd_t *tsd)
-{
-
+prof_boot2(tsd_t *tsd) {
cassert(config_prof);
if (opt_prof) {
@@ -2198,69 +2290,88 @@ prof_boot2(tsd_t *tsd)
prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE))
- return (true);
+ WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP))
- return (true);
+ WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
- return (true);
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp))
- return (true);
+ prof_bt_keycomp)) {
+ return true;
+ }
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX))
- return (true);
+ WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
tdata_tree_new(&tdatas);
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS))
- return (true);
+ WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
next_thr_uid = 0;
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID))
- return (true);
+ WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ))
- return (true);
+ WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP))
- return (true);
+ WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
- if (gctx_locks == NULL)
- return (true);
+ b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
+ if (gctx_locks == NULL) {
+ return true;
+ }
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
- WITNESS_RANK_PROF_GCTX))
- return (true);
+ WITNESS_RANK_PROF_GCTX,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
}
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
- if (tdata_locks == NULL)
- return (true);
+ b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
+ if (tdata_locks == NULL) {
+ return true;
+ }
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
- WITNESS_RANK_PROF_TDATA))
- return (true);
+ WITNESS_RANK_PROF_TDATA,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
}
}
@@ -2274,31 +2385,29 @@ prof_boot2(tsd_t *tsd)
prof_booted = true;
- return (false);
+ return false;
}
void
-prof_prefork0(tsdn_t *tsdn)
-{
-
- if (opt_prof) {
+prof_prefork0(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_prefork(tsdn, &prof_dump_mtx);
malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
malloc_mutex_prefork(tsdn, &tdatas_mtx);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ }
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
+ }
}
}
void
-prof_prefork1(tsdn_t *tsdn)
-{
-
- if (opt_prof) {
+prof_prefork1(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
malloc_mutex_prefork(tsdn, &prof_active_mtx);
malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
@@ -2308,10 +2417,8 @@ prof_prefork1(tsdn_t *tsdn)
}
void
-prof_postfork_parent(tsdn_t *tsdn)
-{
-
- if (opt_prof) {
+prof_postfork_parent(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_postfork_parent(tsdn,
@@ -2320,10 +2427,12 @@ prof_postfork_parent(tsdn_t *tsdn)
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ }
malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
@@ -2331,10 +2440,8 @@ prof_postfork_parent(tsdn_t *tsdn)
}
void
-prof_postfork_child(tsdn_t *tsdn)
-{
-
- if (opt_prof) {
+prof_postfork_child(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
@@ -2342,10 +2449,12 @@ prof_postfork_child(tsdn_t *tsdn)
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ }
malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
diff --git a/contrib/jemalloc/src/quarantine.c b/contrib/jemalloc/src/quarantine.c
deleted file mode 100644
index 18903fb5c7d8..000000000000
--- a/contrib/jemalloc/src/quarantine.c
+++ /dev/null
@@ -1,183 +0,0 @@
-#define JEMALLOC_QUARANTINE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/*
- * Quarantine pointers close to NULL are used to encode state information that
- * is used for cleaning up during thread shutdown.
- */
-#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
-#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
-#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
-static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
-static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
- size_t upper_bound);
-
-/******************************************************************************/
-
-static quarantine_t *
-quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
-{
- quarantine_t *quarantine;
- size_t size;
-
- size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
- sizeof(quarantine_obj_t));
- quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
- false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
- if (quarantine == NULL)
- return (NULL);
- quarantine->curbytes = 0;
- quarantine->curobjs = 0;
- quarantine->first = 0;
- quarantine->lg_maxobjs = lg_maxobjs;
-
- return (quarantine);
-}
-
-void
-quarantine_alloc_hook_work(tsd_t *tsd)
-{
- quarantine_t *quarantine;
-
- if (!tsd_nominal(tsd))
- return;
-
- quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
- /*
- * Check again whether quarantine has been initialized, because
- * quarantine_init() may have triggered recursive initialization.
- */
- if (tsd_quarantine_get(tsd) == NULL)
- tsd_quarantine_set(tsd, quarantine);
- else
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
-}
-
-static quarantine_t *
-quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
-{
- quarantine_t *ret;
-
- ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
- if (ret == NULL) {
- quarantine_drain_one(tsd_tsdn(tsd), quarantine);
- return (quarantine);
- }
-
- ret->curbytes = quarantine->curbytes;
- ret->curobjs = quarantine->curobjs;
- if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
- quarantine->lg_maxobjs)) {
- /* objs ring buffer data are contiguous. */
- memcpy(ret->objs, &quarantine->objs[quarantine->first],
- quarantine->curobjs * sizeof(quarantine_obj_t));
- } else {
- /* objs ring buffer data wrap around. */
- size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
- quarantine->first;
- size_t ncopy_b = quarantine->curobjs - ncopy_a;
-
- memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
- * sizeof(quarantine_obj_t));
- memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
- sizeof(quarantine_obj_t));
- }
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
-
- tsd_quarantine_set(tsd, ret);
- return (ret);
-}
-
-static void
-quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
-{
- quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
- idalloctm(tsdn, obj->ptr, NULL, false, true);
- quarantine->curbytes -= obj->usize;
- quarantine->curobjs--;
- quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
- quarantine->lg_maxobjs) - 1);
-}
-
-static void
-quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
-{
-
- while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(tsdn, quarantine);
-}
-
-void
-quarantine(tsd_t *tsd, void *ptr)
-{
- quarantine_t *quarantine;
- size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
-
- cassert(config_fill);
- assert(opt_quarantine);
-
- if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
- idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
- return;
- }
- /*
- * Drain one or more objects if the quarantine size limit would be
- * exceeded by appending ptr.
- */
- if (quarantine->curbytes + usize > opt_quarantine) {
- size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- - usize : 0;
- quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
- }
- /* Grow the quarantine ring buffer if it's full. */
- if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
- quarantine = quarantine_grow(tsd, quarantine);
- /* quarantine_grow() must free a slot if it fails to grow. */
- assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
- /* Append ptr if its size doesn't exceed the quarantine size. */
- if (quarantine->curbytes + usize <= opt_quarantine) {
- size_t offset = (quarantine->first + quarantine->curobjs) &
- ((ZU(1) << quarantine->lg_maxobjs) - 1);
- quarantine_obj_t *obj = &quarantine->objs[offset];
- obj->ptr = ptr;
- obj->usize = usize;
- quarantine->curbytes += usize;
- quarantine->curobjs++;
- if (config_fill && unlikely(opt_junk_free)) {
- /*
- * Only do redzone validation if Valgrind isn't in
- * operation.
- */
- if ((!config_valgrind || likely(!in_valgrind))
- && usize <= SMALL_MAXCLASS)
- arena_quarantine_junk_small(ptr, usize);
- else
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
- }
- } else {
- assert(quarantine->curbytes == 0);
- idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
- }
-}
-
-void
-quarantine_cleanup(tsd_t *tsd)
-{
- quarantine_t *quarantine;
-
- if (!config_fill)
- return;
-
- quarantine = tsd_quarantine_get(tsd);
- if (quarantine != NULL) {
- quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
- tsd_quarantine_set(tsd, NULL);
- }
-}
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
index f2e2997d5535..53702cf7236c 100644
--- a/contrib/jemalloc/src/rtree.c
+++ b/contrib/jemalloc/src/rtree.c
@@ -1,132 +1,320 @@
-#define JEMALLOC_RTREE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-static unsigned
-hmin(unsigned ha, unsigned hb)
-{
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
- return (ha < hb ? ha : hb);
+/*
+ * Only the most significant bits of keys passed to rtree_{read,write}() are
+ * used.
+ */
+bool
+rtree_new(rtree_t *rtree, bool zeroed) {
+#ifdef JEMALLOC_JET
+ if (!zeroed) {
+ memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
+ }
+#else
+ assert(zeroed);
+#endif
+
+ if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ return false;
}
-/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
-bool
-rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
- rtree_node_dalloc_t *dalloc)
-{
- unsigned bits_in_leaf, height, i;
-
- assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
- RTREE_BITS_PER_LEVEL));
- assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
-
- bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
- : (bits % RTREE_BITS_PER_LEVEL);
- if (bits > bits_in_leaf) {
- height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
- if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
- height++;
- } else
- height = 1;
- assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
-
- rtree->alloc = alloc;
- rtree->dalloc = dalloc;
- rtree->height = height;
-
- /* Root level. */
- rtree->levels[0].subtree = NULL;
- rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
- bits_in_leaf;
- rtree->levels[0].cumbits = rtree->levels[0].bits;
- /* Interior levels. */
- for (i = 1; i < height-1; i++) {
- rtree->levels[i].subtree = NULL;
- rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
- rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
- RTREE_BITS_PER_LEVEL;
- }
- /* Leaf level. */
- if (height > 1) {
- rtree->levels[height-1].subtree = NULL;
- rtree->levels[height-1].bits = bits_in_leaf;
- rtree->levels[height-1].cumbits = bits;
- }
-
- /* Compute lookup table to be used by rtree_start_level(). */
- for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
- rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
- 1);
- }
-
- return (false);
+static rtree_node_elm_t *
+rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_node_elm_t), CACHELINE);
}
+rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
static void
-rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
-{
+rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
+ /* Nodes are never deleted during normal operation. */
+ not_reached();
+}
+UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
+ rtree_node_dalloc_impl;
- if (level + 1 < rtree->height) {
- size_t nchildren, i;
+static rtree_leaf_elm_t *
+rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_leaf_elm_t), CACHELINE);
+}
+rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
+
+static void
+rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
+ /* Leaves are never deleted during normal operation. */
+ not_reached();
+}
+UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
+ rtree_leaf_dalloc_impl;
- nchildren = ZU(1) << rtree->levels[level].bits;
- for (i = 0; i < nchildren; i++) {
- rtree_node_elm_t *child = node[i].child;
- if (child != NULL)
- rtree_delete_subtree(rtree, child, level + 1);
+#ifdef JEMALLOC_JET
+# if RTREE_HEIGHT > 1
+static void
+rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
+ unsigned level) {
+ size_t nchildren = ZU(1) << rtree_levels[level].bits;
+ if (level + 2 < RTREE_HEIGHT) {
+ for (size_t i = 0; i < nchildren; i++) {
+ rtree_node_elm_t *node =
+ (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
+ ATOMIC_RELAXED);
+ if (node != NULL) {
+ rtree_delete_subtree(tsdn, rtree, node, level +
+ 1);
+ }
+ }
+ } else {
+ for (size_t i = 0; i < nchildren; i++) {
+ rtree_leaf_elm_t *leaf =
+ (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
+ ATOMIC_RELAXED);
+ if (leaf != NULL) {
+ rtree_leaf_dalloc(tsdn, rtree, leaf);
+ }
}
}
- rtree->dalloc(node);
+
+ if (subtree != rtree->root) {
+ rtree_node_dalloc(tsdn, rtree, subtree);
+ }
}
+# endif
void
-rtree_delete(rtree_t *rtree)
-{
- unsigned i;
+rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
+# if RTREE_HEIGHT > 1
+ rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
+# endif
+}
+#endif
+
+static rtree_node_elm_t *
+rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
+ atomic_p_t *elmp) {
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+ /*
+ * If *elmp is non-null, then it was initialized with the init lock
+ * held, so we can get by with 'relaxed' here.
+ */
+ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
+ if (node == NULL) {
+ node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
+ rtree_levels[level].bits);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ return NULL;
+ }
+ /*
+ * Even though we hold the lock, a later reader might not; we
+ * need release semantics.
+ */
+ atomic_store_p(elmp, node, ATOMIC_RELEASE);
+ }
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+
+ return node;
+}
+
+static rtree_leaf_elm_t *
+rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+ /*
+ * If *elmp is non-null, then it was initialized with the init lock
+ * held, so we can get by with 'relaxed' here.
+ */
+ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
+ if (leaf == NULL) {
+ leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ if (leaf == NULL) {
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ return NULL;
+ }
+ /*
+ * Even though we hold the lock, a later reader might not; we
+ * need release semantics.
+ */
+ atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
+ }
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+
+ return leaf;
+}
+
+static bool
+rtree_node_valid(rtree_node_elm_t *node) {
+ return ((uintptr_t)node != (uintptr_t)0);
+}
+
+static bool
+rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
+ return ((uintptr_t)leaf != (uintptr_t)0);
+}
+
+static rtree_node_elm_t *
+rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
+ rtree_node_elm_t *node;
- for (i = 0; i < rtree->height; i++) {
- rtree_node_elm_t *subtree = rtree->levels[i].subtree;
- if (subtree != NULL)
- rtree_delete_subtree(rtree, subtree, i);
+ if (dependent) {
+ node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_RELAXED);
+ } else {
+ node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_ACQUIRE);
}
+
+ assert(!dependent || node != NULL);
+ return node;
}
static rtree_node_elm_t *
-rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
-{
+rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent) {
rtree_node_elm_t *node;
- if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
- spin_t spinner;
+ node = rtree_child_node_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(node))) {
+ node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
+ }
+ assert(!dependent || node != NULL);
+ return node;
+}
+
+static rtree_leaf_elm_t *
+rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
+ rtree_leaf_elm_t *leaf;
- /*
- * Another thread is already in the process of initializing.
- * Spin-wait until initialization is complete.
- */
- spin_init(&spinner);
- do {
- spin_adaptive(&spinner);
- node = atomic_read_p((void **)elmp);
- } while (node == RTREE_NODE_INITIALIZING);
+ if (dependent) {
+ leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_RELAXED);
} else {
- node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
- if (node == NULL)
- return (NULL);
- atomic_write_p((void **)elmp, node);
+ leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_ACQUIRE);
}
- return (node);
+ assert(!dependent || leaf != NULL);
+ return leaf;
}
-rtree_node_elm_t *
-rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
-{
+static rtree_leaf_elm_t *
+rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent) {
+ rtree_leaf_elm_t *leaf;
- return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
+ leaf = rtree_child_leaf_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
+ leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
+ }
+ assert(!dependent || leaf != NULL);
+ return leaf;
}
-rtree_node_elm_t *
-rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
-{
+rtree_leaf_elm_t *
+rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, bool init_missing) {
+ rtree_node_elm_t *node;
+ rtree_leaf_elm_t *leaf;
+#if RTREE_HEIGHT > 1
+ node = rtree->root;
+#else
+ leaf = rtree->root;
+#endif
+
+ if (config_debug) {
+ uintptr_t leafkey = rtree_leafkey(key);
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
+ assert(rtree_ctx->cache[i].leafkey != leafkey);
+ }
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
+ assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
+ }
+ }
+
+#define RTREE_GET_CHILD(level) { \
+ assert(level < RTREE_HEIGHT-1); \
+ if (level != 0 && !dependent && \
+ unlikely(!rtree_node_valid(node))) { \
+ return NULL; \
+ } \
+ uintptr_t subkey = rtree_subkey(key, level); \
+ if (level + 2 < RTREE_HEIGHT) { \
+ node = init_missing ? \
+ rtree_child_node_read(tsdn, rtree, \
+ &node[subkey], level, dependent) : \
+ rtree_child_node_tryread(&node[subkey], \
+ dependent); \
+ } else { \
+ leaf = init_missing ? \
+ rtree_child_leaf_read(tsdn, rtree, \
+ &node[subkey], level, dependent) : \
+ rtree_child_leaf_tryread(&node[subkey], \
+ dependent); \
+ } \
+ }
+ /*
+ * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
+ * (1) evict last entry in L2 cache; (2) move the collision slot from L1
+ * cache down to L2; and 3) fill L1.
+ */
+#define RTREE_GET_LEAF(level) { \
+ assert(level == RTREE_HEIGHT-1); \
+ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
+ return NULL; \
+ } \
+ if (RTREE_CTX_NCACHE_L2 > 1) { \
+ memmove(&rtree_ctx->l2_cache[1], \
+ &rtree_ctx->l2_cache[0], \
+ sizeof(rtree_ctx_cache_elm_t) * \
+ (RTREE_CTX_NCACHE_L2 - 1)); \
+ } \
+ size_t slot = rtree_cache_direct_map(key); \
+ rtree_ctx->l2_cache[0].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[0].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ uintptr_t leafkey = rtree_leafkey(key); \
+ rtree_ctx->cache[slot].leafkey = leafkey; \
+ rtree_ctx->cache[slot].leaf = leaf; \
+ uintptr_t subkey = rtree_subkey(key, level); \
+ return &leaf[subkey]; \
+ }
+ if (RTREE_HEIGHT > 1) {
+ RTREE_GET_CHILD(0)
+ }
+ if (RTREE_HEIGHT > 2) {
+ RTREE_GET_CHILD(1)
+ }
+ if (RTREE_HEIGHT > 3) {
+ for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
+ RTREE_GET_CHILD(i)
+ }
+ }
+ RTREE_GET_LEAF(RTREE_HEIGHT-1)
+#undef RTREE_GET_CHILD
+#undef RTREE_GET_LEAF
+ not_reached();
+}
- return (rtree_node_init(rtree, level+1, &elm->child));
+void
+rtree_ctx_data_init(rtree_ctx_t *ctx) {
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
+ rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
+ cache->leafkey = RTREE_LEAFKEY_INVALID;
+ cache->leaf = NULL;
+ }
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
+ rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
+ cache->leafkey = RTREE_LEAFKEY_INVALID;
+ cache->leaf = NULL;
+ }
}
diff --git a/contrib/jemalloc/src/spin.c b/contrib/jemalloc/src/spin.c
index 5242d95aa10f..24372c26c949 100644
--- a/contrib/jemalloc/src/spin.c
+++ b/contrib/jemalloc/src/spin.c
@@ -1,2 +1,4 @@
-#define JEMALLOC_SPIN_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_SPIN_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/spin.h"
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
index b76afc5a756d..087df7676e92 100644
--- a/contrib/jemalloc/src/stats.c
+++ b/contrib/jemalloc/src/stats.c
@@ -1,13 +1,31 @@
-#define JEMALLOC_STATS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#define CTL_GET(n, v, t) do { \
+#define JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_prof.h"
+
+const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
+#define OP(mtx) #mtx,
+ MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+};
+
+const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
+#define OP(mtx) #mtx,
+ MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+};
+
+#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_GET(n, i, v, t) do { \
- size_t mib[6]; \
+#define CTL_M2_GET(n, i, v, t) do { \
+ size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
@@ -15,8 +33,8 @@
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_M4_GET(n, i, j, v, t) do { \
- size_t mib[6]; \
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+ size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
@@ -28,16 +46,79 @@
/******************************************************************************/
/* Data. */
-bool opt_stats_print = false;
-
-size_t stats_cactive = 0;
+bool opt_stats_print = false;
+char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
/******************************************************************************/
+/* Calculate x.yyy and output a string (takes a fixed sized char array). */
+static bool
+get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
+ if (divisor == 0 || dividend > divisor) {
+ /* The rate is not supposed to be greater than 1. */
+ return true;
+ }
+ if (dividend > 0) {
+ assert(UINT64_MAX / dividend >= 1000);
+ }
+
+ unsigned n = (unsigned)((dividend * 1000) / divisor);
+ if (n < 10) {
+ malloc_snprintf(str, 6, "0.00%u", n);
+ } else if (n < 100) {
+ malloc_snprintf(str, 6, "0.0%u", n);
+ } else if (n < 1000) {
+ malloc_snprintf(str, 6, "0.%u", n);
+ } else {
+ malloc_snprintf(str, 6, "1");
+ }
+
+ return false;
+}
+
+#define MUTEX_CTL_STR_MAX_LENGTH 128
+static void
+gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
+ const char *mutex, const char *counter) {
+ malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
+}
+
+static void
+read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind,
+ uint64_t results[mutex_prof_num_counters]) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+#define OP(c, t) \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "arenas.0.bins.0","mutex", #c); \
+ CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
+ (t *)&results[mutex_counter_##c], t);
+MUTEX_PROF_COUNTERS
+#undef OP
+}
+
+static void
+mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *name, uint64_t stats[mutex_prof_num_counters],
+ const char *json_indent, bool last) {
+ malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name);
+
+ mutex_prof_counter_ind_t k = 0;
+ char *fmt_str[2] = {"%s\t\"%s\": %"FMTu32"%s\n",
+ "%s\t\"%s\": %"FMTu64"%s\n"};
+#define OP(c, t) \
+ malloc_cprintf(write_cb, cbopaque, \
+ fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
+ json_indent, #c, (t)stats[mutex_counter_##c], \
+ (++k == mutex_prof_num_counters) ? "" : ",");
+MUTEX_PROF_COUNTERS
+#undef OP
+ malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent,
+ last ? "" : ",");
+}
+
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool large, bool huge, unsigned i)
-{
+ bool json, bool large, bool mutex, unsigned i) {
size_t page;
bool in_gap, in_gap_prev;
unsigned nbins, j;
@@ -49,32 +130,26 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"bins\": [\n");
} else {
- if (config_tcache) {
- malloc_cprintf(write_cb, cbopaque,
- "bins: size ind allocated nmalloc"
- " ndalloc nrequests curregs"
- " curruns regs pgs util nfills"
- " nflushes newruns reruns\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "bins: size ind allocated nmalloc"
- " ndalloc nrequests curregs"
- " curruns regs pgs util newruns"
- " reruns\n");
- }
+ char *mutex_counters = " n_lock_ops n_waiting"
+ " n_spin_acq total_wait_ns max_wait_ns\n";
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curslabs regs"
+ " pgs util nfills nflushes newslabs"
+ " reslabs%s", mutex ? mutex_counters : "\n");
}
for (j = 0, in_gap = false; j < nbins; j++) {
- uint64_t nruns;
- size_t reg_size, run_size, curregs;
- size_t curruns;
+ uint64_t nslabs;
+ size_t reg_size, slab_size, curregs;
+ size_t curslabs;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
- uint64_t nreruns;
+ uint64_t nreslabs;
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
uint64_t);
in_gap_prev = in_gap;
- in_gap = (nruns == 0);
+ in_gap = (nslabs == 0);
if (!json && in_gap_prev && !in_gap) {
malloc_cprintf(write_cb, cbopaque,
@@ -83,7 +158,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
+ CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
uint64_t);
@@ -93,15 +168,13 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
&nrequests, uint64_t);
- if (config_tcache) {
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
- &nfills, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
- &nflushes, uint64_t);
- }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
size_t);
if (json) {
@@ -110,80 +183,75 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
"\t\t\t\t\t\t\"curregs\": %zu,\n"
- "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
- nmalloc,
- ndalloc,
- curregs,
- nrequests);
- if (config_tcache) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
- nfills,
- nflushes);
+ "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curslabs\": %zu%s\n",
+ nmalloc, ndalloc, curregs, nrequests, nfills,
+ nflushes, nreslabs, curslabs, mutex ? "," : "");
+ if (mutex) {
+ uint64_t mutex_stats[mutex_prof_num_counters];
+ read_arena_bin_mutex_stats(i, j, mutex_stats);
+ mutex_stats_output_json(write_cb, cbopaque,
+ "mutex", mutex_stats, "\t\t\t\t\t\t", true);
}
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"curruns\": %zu\n"
"\t\t\t\t\t}%s\n",
- nreruns,
- curruns,
(j + 1 < nbins) ? "," : "");
} else if (!in_gap) {
- size_t availregs, milli;
- char util[6]; /* "x.yyy". */
-
- availregs = nregs * curruns;
- milli = (availregs != 0) ? (1000 * curregs) / availregs
- : 1000;
-
- if (milli > 1000) {
- /*
- * Race detected: the counters were read in
- * separate mallctl calls and concurrent
- * operations happened in between. In this case
- * no meaningful utilization can be computed.
- */
- malloc_snprintf(util, sizeof(util), " race");
- } else if (milli < 10) {
- malloc_snprintf(util, sizeof(util),
- "0.00%zu", milli);
- } else if (milli < 100) {
- malloc_snprintf(util, sizeof(util), "0.0%zu",
- milli);
- } else if (milli < 1000) {
- malloc_snprintf(util, sizeof(util), "0.%zu",
- milli);
- } else {
- assert(milli == 1000);
- malloc_snprintf(util, sizeof(util), "1");
+ size_t availregs = nregs * curslabs;
+ char util[6];
+ if (get_rate_str((uint64_t)curregs, (uint64_t)availregs,
+ util)) {
+ if (availregs == 0) {
+ malloc_snprintf(util, sizeof(util),
+ "1");
+ } else if (curregs > availregs) {
+ /*
+ * Race detected: the counters were read
+ * in separate mallctl calls and
+ * concurrent operations happened in
+ * between. In this case no meaningful
+ * utilization can be computed.
+ */
+ malloc_snprintf(util, sizeof(util),
+ " race");
+ } else {
+ not_reached();
+ }
}
+ uint64_t mutex_stats[mutex_prof_num_counters];
+ if (mutex) {
+ read_arena_bin_mutex_stats(i, j, mutex_stats);
+ }
+
+ malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"
+ FMTu64" %12"FMTu64" %12"FMTu64" %12zu %12zu %4u"
+ " %3zu %-5s %12"FMTu64" %12"FMTu64" %12"FMTu64
+ " %12"FMTu64, reg_size, j, curregs * reg_size,
+ nmalloc, ndalloc, nrequests, curregs, curslabs,
+ nregs, slab_size / page, util, nfills, nflushes,
+ nslabs, nreslabs);
- if (config_tcache) {
+ /* Output less info for bin mutexes to save space. */
+ if (mutex) {
malloc_cprintf(write_cb, cbopaque,
- "%20zu %3u %12zu %12"FMTu64
- " %12"FMTu64" %12"FMTu64" %12zu"
- " %12zu %4u %3zu %-5s %12"FMTu64
- " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
- reg_size, j, curregs * reg_size, nmalloc,
- ndalloc, nrequests, curregs, curruns, nregs,
- run_size / page, util, nfills, nflushes,
- nruns, nreruns);
+ " %12"FMTu64" %12"FMTu64" %12"FMTu64
+ " %14"FMTu64" %12"FMTu64"\n",
+ mutex_stats[mutex_counter_num_ops],
+ mutex_stats[mutex_counter_num_wait],
+ mutex_stats[mutex_counter_num_spin_acq],
+ mutex_stats[mutex_counter_total_wait_time],
+ mutex_stats[mutex_counter_max_wait_time]);
} else {
- malloc_cprintf(write_cb, cbopaque,
- "%20zu %3u %12zu %12"FMTu64
- " %12"FMTu64" %12"FMTu64" %12zu"
- " %12zu %4u %3zu %-5s %12"FMTu64
- " %12"FMTu64"\n",
- reg_size, j, curregs * reg_size, nmalloc,
- ndalloc, nrequests, curregs, curruns, nregs,
- run_size / page, util, nruns, nreruns);
+ malloc_cprintf(write_cb, cbopaque, "\n");
}
}
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]%s\n", (large || huge) ? "," : "");
+ "\t\t\t\t]%s\n", large ? "," : "");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
@@ -193,31 +261,30 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
static void
-stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool huge, unsigned i)
-{
- unsigned nbins, nlruns, j;
+stats_arena_lextents_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, bool json, unsigned i) {
+ unsigned nbins, nlextents, j;
bool in_gap, in_gap_prev;
CTL_GET("arenas.nbins", &nbins, unsigned);
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ CTL_GET("arenas.nlextents", &nlextents, unsigned);
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"lruns\": [\n");
+ "\t\t\t\t\"lextents\": [\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"large: size ind allocated nmalloc"
- " ndalloc nrequests curruns\n");
+ " ndalloc nrequests curlextents\n");
}
- for (j = 0, in_gap = false; j < nlruns; j++) {
+ for (j = 0, in_gap = false; j < nlextents; j++) {
uint64_t nmalloc, ndalloc, nrequests;
- size_t run_size, curruns;
+ size_t lextent_size, curlextents;
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
&nrequests, uint64_t);
in_gap_prev = in_gap;
in_gap = (nrequests == 0);
@@ -227,27 +294,28 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
" ---\n");
}
- CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns,
- size_t);
+ CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
+ &curlextents, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"curruns\": %zu\n"
+ "\t\t\t\t\t\t\"curlextents\": %zu\n"
"\t\t\t\t\t}%s\n",
- curruns,
- (j + 1 < nlruns) ? "," : "");
+ curlextents,
+ (j + 1 < nlextents) ? "," : "");
} else if (!in_gap) {
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
- run_size, nbins + j, curruns * run_size, nmalloc,
- ndalloc, nrequests, curruns);
+ lextent_size, nbins + j,
+ curlextents * lextent_size, nmalloc, ndalloc,
+ nrequests, curlextents);
}
}
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]%s\n", huge ? "," : "");
+ "\t\t\t\t]\n");
} else {
if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
@@ -257,87 +325,91 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
static void
-stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
- void *cbopaque, bool json, unsigned i)
-{
- unsigned nbins, nlruns, nhchunks, j;
- bool in_gap, in_gap_prev;
+read_arena_mutex_stats(unsigned arena_ind,
+ uint64_t results[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+
+ mutex_prof_arena_ind_t i;
+ for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
+#define OP(c, t) \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "arenas.0.mutexes", arena_mutex_names[i], #c); \
+ CTL_M2_GET(cmd, arena_ind, \
+ (t *)&results[i][mutex_counter_##c], t);
+MUTEX_PROF_COUNTERS
+#undef OP
+ }
+}
- CTL_GET("arenas.nbins", &nbins, unsigned);
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
- CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"hchunks\": [\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "huge: size ind allocated nmalloc"
- " ndalloc nrequests curhchunks\n");
+static void
+mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *name, uint64_t stats[mutex_prof_num_counters],
+ bool first_mutex) {
+ if (first_mutex) {
+ /* Print title. */
+ malloc_cprintf(write_cb, cbopaque,
+ " n_lock_ops n_waiting"
+ " n_spin_acq n_owner_switch total_wait_ns"
+ " max_wait_ns max_n_thds\n");
}
- for (j = 0, in_gap = false; j < nhchunks; j++) {
- uint64_t nmalloc, ndalloc, nrequests;
- size_t hchunk_size, curhchunks;
- CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
- &nmalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
- &ndalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
- &nrequests, uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nrequests == 0);
+ malloc_cprintf(write_cb, cbopaque, "%s", name);
+ malloc_cprintf(write_cb, cbopaque, ":%*c",
+ (int)(20 - strlen(name)), ' ');
+
+ char *fmt_str[2] = {"%12"FMTu32, "%16"FMTu64};
+#define OP(c, t) \
+ malloc_cprintf(write_cb, cbopaque, \
+ fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
+ (t)stats[mutex_counter_##c]);
+MUTEX_PROF_COUNTERS
+#undef OP
+ malloc_cprintf(write_cb, cbopaque, "\n");
+}
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+static void
+stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, bool json, bool json_end, unsigned arena_ind) {
+ uint64_t mutex_stats[mutex_prof_num_arena_mutexes][mutex_prof_num_counters];
+ read_arena_mutex_stats(arena_ind, mutex_stats);
- CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j,
- &curhchunks, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"curhchunks\": %zu\n"
- "\t\t\t\t\t}%s\n",
- curhchunks,
- (j + 1 < nhchunks) ? "," : "");
- } else if (!in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64" %12zu\n",
- hchunk_size, nbins + nlruns + j,
- curhchunks * hchunk_size, nmalloc, ndalloc,
- nrequests, curhchunks);
- }
- }
+ /* Output mutex stats. */
if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]\n");
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n");
+ mutex_prof_arena_ind_t i, last_mutex;
+ last_mutex = mutex_prof_num_arena_mutexes - 1;
+ for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
+ mutex_stats_output_json(write_cb, cbopaque,
+ arena_mutex_names[i], mutex_stats[i],
+ "\t\t\t\t\t", (i == last_mutex));
+ }
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n",
+ json_end ? "" : ",");
} else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
+ mutex_prof_arena_ind_t i;
+ for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
+ mutex_stats_output(write_cb, cbopaque,
+ arena_mutex_names[i], mutex_stats[i], i == 0);
}
}
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, unsigned i, bool bins, bool large, bool huge)
-{
+ bool json, unsigned i, bool bins, bool large, bool mutex) {
unsigned nthreads;
const char *dss;
- ssize_t lg_dirty_mult, decay_time;
- size_t page, pactive, pdirty, mapped, retained;
- size_t metadata_mapped, metadata_allocated;
- uint64_t npurge, nmadvise, purged;
+ ssize_t dirty_decay_ms, muzzy_decay_ms;
+ size_t page, pactive, pdirty, pmuzzy, mapped, retained;
+ size_t base, internal, resident;
+ uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
+ uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
- size_t huge_allocated;
- uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
+ size_t tcache_bytes;
+ uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
@@ -350,68 +422,88 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"assigned threads: %u\n", nthreads);
}
- CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
+ CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"dss\": \"%s\",\n", dss);
+ "\t\t\t\t\"uptime_ns\": %"FMTu64",\n", uptime);
} else {
malloc_cprintf(write_cb, cbopaque,
- "dss allocation precedence: %s\n", dss);
+ "uptime: %"FMTu64"\n", uptime);
}
- CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult);
+ "\t\t\t\t\"dss\": \"%s\",\n", dss);
} else {
- if (opt_purge == purge_mode_ratio) {
- if (lg_dirty_mult >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "min active:dirty page ratio: %u:1\n",
- (1U << lg_dirty_mult));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "min active:dirty page ratio: N/A\n");
- }
- }
- }
-
- CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
- if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"decay_time\": %zd,\n", decay_time);
- } else {
- if (opt_purge == purge_mode_decay) {
- if (decay_time >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "decay time: %zd\n", decay_time);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "decay time: N/A\n");
- }
- }
+ "dss allocation precedence: %s\n", dss);
}
+ CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
+ ssize_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
+ ssize_t);
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
- CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
- CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
- CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
+ CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
+ CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
+ uint64_t);
+ CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
+ uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_decay_ms\": %zd,\n", dirty_decay_ms);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"muzzy_decay_ms\": %zd,\n", muzzy_decay_ms);
+ malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"pactive\": %zu,\n", pactive);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"pdirty\": %zu,\n", pdirty);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
+ "\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
+ "\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"purged\": %"FMTu64",\n", purged);
+ "\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged);
} else {
malloc_cprintf(write_cb, cbopaque,
- "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
- ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
+ "decaying: time npages sweeps madvises"
+ " purged\n");
+ if (dirty_decay_ms >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ " dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", dirty_decay_ms, pdirty, dirty_npurge,
+ dirty_nmadvise, dirty_purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise,
+ dirty_purged);
+ }
+ if (muzzy_decay_ms >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ " muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", muzzy_decay_ms, pmuzzy, muzzy_npurge,
+ muzzy_nmadvise, muzzy_purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise,
+ muzzy_purged);
+ }
}
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
@@ -473,40 +565,12 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc,
large_nrequests);
- }
-
- CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
- CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
- CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
- CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
- uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"huge\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "huge: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
- small_allocated + large_allocated + huge_allocated,
- small_nmalloc + large_nmalloc + huge_nmalloc,
- small_ndalloc + large_ndalloc + huge_ndalloc,
- small_nrequests + large_nrequests + huge_nrequests);
+ small_allocated + large_allocated, small_nmalloc +
+ large_nmalloc, small_ndalloc + large_ndalloc,
+ small_nrequests + large_nrequests);
}
if (!json) {
malloc_cprintf(write_cb, cbopaque,
@@ -531,41 +595,59 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"retained: %12zu\n", retained);
}
- CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
- size_t);
- CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
- size_t);
+ CTL_M2_GET("stats.arenas.0.base", i, &base, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"metadata\": {\n");
+ "\t\t\t\t\"base\": %zu,\n", base);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "base: %12zu\n", base);
+ }
+ CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t);
+ if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
+ "\t\t\t\t\"internal\": %zu,\n", internal);
+ } else {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
+ "internal: %12zu\n", internal);
+ }
+ CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes, size_t);
+ if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (bins || large || huge) ? "," : "");
+ "\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
} else {
malloc_cprintf(write_cb, cbopaque,
- "metadata: mapped: %zu, allocated: %zu\n",
- metadata_mapped, metadata_allocated);
+ "tcache: %12zu\n", tcache_bytes);
}
+ CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"resident\": %zu%s\n", resident,
+ (bins || large || mutex) ? "," : "");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "resident: %12zu\n", resident);
+ }
+
+ if (mutex) {
+ stats_arena_mutexes_print(write_cb, cbopaque, json,
+ !(bins || large), i);
+ }
if (bins) {
- stats_arena_bins_print(write_cb, cbopaque, json, large, huge,
+ stats_arena_bins_print(write_cb, cbopaque, json, large, mutex,
i);
}
- if (large)
- stats_arena_lruns_print(write_cb, cbopaque, json, huge, i);
- if (huge)
- stats_arena_hchunks_print(write_cb, cbopaque, json, i);
+ if (large) {
+ stats_arena_lextents_print(write_cb, cbopaque, json, i);
+ }
}
static void
stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool more)
-{
+ bool json, bool more) {
const char *cpv;
bool bv;
unsigned uv;
@@ -584,11 +666,12 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"version\": \"%s\",\n", cpv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ }
/* config. */
-#define CONFIG_WRITE_BOOL_JSON(n, c) \
+#define CONFIG_WRITE_BOOL_JSON(n, c) \
if (json) { \
CTL_GET("config."#n, &bv, bool); \
malloc_cprintf(write_cb, cbopaque, \
@@ -624,15 +707,12 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
"config.malloc_conf: \"%s\"\n", config_malloc_conf);
}
- CONFIG_WRITE_BOOL_JSON(munmap, ",")
CONFIG_WRITE_BOOL_JSON(prof, ",")
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
CONFIG_WRITE_BOOL_JSON(stats, ",")
- CONFIG_WRITE_BOOL_JSON(tcache, ",")
- CONFIG_WRITE_BOOL_JSON(tls, ",")
+ CONFIG_WRITE_BOOL_JSON(thp, ",")
CONFIG_WRITE_BOOL_JSON(utrace, ",")
- CONFIG_WRITE_BOOL_JSON(valgrind, ",")
CONFIG_WRITE_BOOL_JSON(xmalloc, "")
if (json) {
@@ -642,7 +722,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#undef CONFIG_WRITE_BOOL_JSON
/* opt. */
-#define OPT_WRITE_BOOL(n, c) \
+#define OPT_WRITE_BOOL(n, c) \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
@@ -653,10 +733,10 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
" opt."#n": %s\n", bv ? "true" : "false"); \
} \
}
-#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
+#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
bool bv2; \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
- je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
+ je_mallctl(#m, (void *)&bv2, &bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
@@ -668,7 +748,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
} \
} \
}
-#define OPT_WRITE_UNSIGNED(n, c) \
+#define OPT_WRITE_UNSIGNED(n, c) \
if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
@@ -678,17 +758,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
" opt."#n": %u\n", uv); \
} \
}
-#define OPT_WRITE_SIZE_T(n, c) \
- if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
- } else { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %zu\n", sv); \
- } \
- }
-#define OPT_WRITE_SSIZE_T(n, c) \
+#define OPT_WRITE_SSIZE_T(n, c) \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
@@ -698,7 +768,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
" opt."#n": %zd\n", ssv); \
} \
}
-#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
ssize_t ssv2; \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
@@ -712,7 +782,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
} \
} \
}
-#define OPT_WRITE_CHAR_P(n, c) \
+#define OPT_WRITE_CHAR_P(n, c) \
if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
@@ -731,26 +801,20 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Run-time option settings:\n");
}
OPT_WRITE_BOOL(abort, ",")
- OPT_WRITE_SIZE_T(lg_chunk, ",")
+ OPT_WRITE_BOOL(abort_conf, ",")
+ OPT_WRITE_BOOL(retain, ",")
OPT_WRITE_CHAR_P(dss, ",")
OPT_WRITE_UNSIGNED(narenas, ",")
- OPT_WRITE_CHAR_P(purge, ",")
- if (json || opt_purge == purge_mode_ratio) {
- OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
- arenas.lg_dirty_mult, ",")
- }
- if (json || opt_purge == purge_mode_decay) {
- OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
- }
+ OPT_WRITE_CHAR_P(percpu_arena, ",")
+ OPT_WRITE_BOOL_MUTABLE(background_thread, background_thread, ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_ms, arenas.dirty_decay_ms, ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_ms, arenas.muzzy_decay_ms, ",")
OPT_WRITE_CHAR_P(junk, ",")
- OPT_WRITE_SIZE_T(quarantine, ",")
- OPT_WRITE_BOOL(redzone, ",")
OPT_WRITE_BOOL(zero, ",")
OPT_WRITE_BOOL(utrace, ",")
OPT_WRITE_BOOL(xmalloc, ",")
OPT_WRITE_BOOL(tcache, ",")
OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
- OPT_WRITE_BOOL(thp, ",")
OPT_WRITE_BOOL(prof, ",")
OPT_WRITE_CHAR_P(prof_prefix, ",")
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
@@ -762,12 +826,16 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_gdump, ",")
OPT_WRITE_BOOL(prof_final, ",")
OPT_WRITE_BOOL(prof_leak, ",")
- /*
- * stats_print is always emitted, so as long as stats_print comes last
- * it's safe to unconditionally omit the comma here (rather than having
- * to conditionally omit it elsewhere depending on configuration).
- */
- OPT_WRITE_BOOL(stats_print, "")
+ OPT_WRITE_BOOL(stats_print, ",")
+ if (json || opt_stats_print) {
+ /*
+ * stats_print_opts is always emitted for JSON, so as long as it
+ * comes last it's safe to unconditionally omit the comma here
+ * (rather than having to conditionally omit it elsewhere
+ * depending on configuration).
+ */
+ OPT_WRITE_CHAR_P(stats_print_opts, "")
+ }
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
@@ -775,7 +843,6 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_BOOL_MUTABLE
-#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
@@ -789,47 +856,35 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"narenas\": %u,\n", uv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
-
- CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
- } else if (opt_purge == purge_mode_ratio) {
- if (ssv >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: "
- "%u:1\n", (1U << ssv));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: "
- "N/A\n");
- }
}
- CTL_GET("arenas.decay_time", &ssv, ssize_t);
+
if (json) {
+ CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"decay_time\": %zd,\n", ssv);
- } else if (opt_purge == purge_mode_decay) {
+ "\t\t\t\"dirty_decay_ms\": %zd,\n", ssv);
+
+ CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
malloc_cprintf(write_cb, cbopaque,
- "Unused dirty page decay time: %zd%s\n",
- ssv, (ssv < 0) ? " (no decay)" : "");
+ "\t\t\t\"muzzy_decay_ms\": %zd,\n", ssv);
}
CTL_GET("arenas.quantum", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"quantum\": %zu,\n", sv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ }
CTL_GET("arenas.page", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"page\": %zu,\n", sv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+ }
if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
if (json) {
@@ -842,17 +897,15 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (json) {
- unsigned nbins, nlruns, nhchunks, i;
+ unsigned nbins, nlextents, i;
CTL_GET("arenas.nbins", &nbins, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nbins\": %u,\n", nbins);
- if (config_tcache) {
- CTL_GET("arenas.nhbins", &uv, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nhbins\": %u,\n", uv);
- }
+ CTL_GET("arenas.nhbins", &uv, unsigned);
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t\"nhbins\": %u,\n",
+ uv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"bin\": [\n");
@@ -868,9 +921,9 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
- CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
+ CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"run_size\": %zu\n", sv);
+ "\t\t\t\t\t\"slab_size\": %zu\n", sv);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
@@ -878,42 +931,22 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\t\t\t],\n");
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ CTL_GET("arenas.nlextents", &nlextents, unsigned);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nlruns\": %u,\n", nlruns);
+ "\t\t\t\"nlextents\": %u,\n", nlextents);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lrun\": [\n");
- for (i = 0; i < nlruns; i++) {
+ "\t\t\t\"lextent\": [\n");
+ for (i = 0; i < nlextents; i++) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t{\n");
- CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
+ CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\t\"size\": %zu\n", sv);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : "");
- }
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t],\n");
-
- CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nhchunks\": %u,\n", nhchunks);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"hchunk\": [\n");
- for (i = 0; i < nhchunks; i++) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
-
- CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu\n", sv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : "");
+ "\t\t\t\t}%s\n", (i + 1 < nlextents) ? "," : "");
}
malloc_cprintf(write_cb, cbopaque,
"\t\t\t]\n");
@@ -954,26 +987,59 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
static void
+read_global_mutex_stats(
+ uint64_t results[mutex_prof_num_global_mutexes][mutex_prof_num_counters]) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+
+ mutex_prof_global_ind_t i;
+ for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
+#define OP(c, t) \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "mutexes", global_mutex_names[i], #c); \
+ CTL_GET(cmd, (t *)&results[i][mutex_counter_##c], t);
+MUTEX_PROF_COUNTERS
+#undef OP
+ }
+}
+
+static void
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
-{
- size_t *cactive;
+ bool json, bool merged, bool destroyed, bool unmerged, bool bins,
+ bool large, bool mutex) {
size_t allocated, active, metadata, resident, mapped, retained;
+ size_t num_background_threads;
+ uint64_t background_thread_num_runs, background_thread_run_interval;
- CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
+
+ uint64_t mutex_stats[mutex_prof_num_global_mutexes][mutex_prof_num_counters];
+ if (mutex) {
+ read_global_mutex_stats(mutex_stats);
+ }
+
+ if (have_background_thread) {
+ CTL_GET("stats.background_thread.num_threads",
+ &num_background_threads, size_t);
+ CTL_GET("stats.background_thread.num_runs",
+ &background_thread_num_runs, uint64_t);
+ CTL_GET("stats.background_thread.run_interval",
+ &background_thread_run_interval, uint64_t);
+ } else {
+ num_background_threads = 0;
+ background_thread_num_runs = 0;
+ background_thread_run_interval = 0;
+ }
+
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"stats\": {\n");
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
- malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"allocated\": %zu,\n", allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"active\": %zu,\n", active);
@@ -984,21 +1050,60 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"mapped\": %zu,\n", mapped);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"retained\": %zu\n", retained);
+ "\t\t\t\"retained\": %zu,\n", retained);
malloc_cprintf(write_cb, cbopaque,
- "\t\t}%s\n", (merged || unmerged) ? "," : "");
+ "\t\t\t\"background_thread\": {\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"num_threads\": %zu,\n", num_background_threads);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"num_runs\": %"FMTu64",\n",
+ background_thread_num_runs);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"run_interval\": %"FMTu64"\n",
+ background_thread_run_interval);
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t}%s\n",
+ mutex ? "," : "");
+
+ if (mutex) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"mutexes\": {\n");
+ mutex_prof_global_ind_t i;
+ for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
+ mutex_stats_output_json(write_cb, cbopaque,
+ global_mutex_names[i], mutex_stats[i],
+ "\t\t\t\t",
+ i == mutex_prof_num_global_mutexes - 1);
+ }
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (merged || unmerged || destroyed) ? "," : "");
} else {
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu,"
" resident: %zu, mapped: %zu, retained: %zu\n",
allocated, active, metadata, resident, mapped, retained);
- malloc_cprintf(write_cb, cbopaque,
- "Current active ceiling: %zu\n",
- atomic_read_z(cactive));
+
+ if (have_background_thread && num_background_threads > 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Background threads: %zu, num_runs: %"FMTu64", "
+ "run_interval: %"FMTu64" ns\n",
+ num_background_threads,
+ background_thread_num_runs,
+ background_thread_run_interval);
+ }
+ if (mutex) {
+ mutex_prof_global_ind_t i;
+ for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
+ mutex_stats_output(write_cb, cbopaque,
+ global_mutex_names[i], mutex_stats[i],
+ i == 0);
+ }
+ }
}
- if (merged || unmerged) {
+ if (merged || destroyed || unmerged) {
unsigned narenas;
if (json) {
@@ -1008,17 +1113,27 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.narenas", &narenas, unsigned);
{
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ size_t sz;
VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
+ bool destroyed_initialized;
unsigned i, j, ninitialized;
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", (void *)initialized,
- &isz, NULL, 0);
+ xmallctlnametomib("arena.0.initialized", mib, &miblen);
for (i = ninitialized = 0; i < narenas; i++) {
- if (initialized[i])
+ mib[1] = i;
+ sz = sizeof(bool);
+ xmallctlbymib(mib, miblen, &initialized[i], &sz,
+ NULL, 0);
+ if (initialized[i]) {
ninitialized++;
+ }
}
+ mib[1] = MALLCTL_ARENAS_DESTROYED;
+ sz = sizeof(bool);
+ xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
+ NULL, 0);
/* Merged stats. */
if (merged && (ninitialized > 1 || !unmerged)) {
@@ -1031,7 +1146,29 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
"\nMerged arenas stats:\n");
}
stats_arena_print(write_cb, cbopaque, json,
- narenas, bins, large, huge);
+ MALLCTL_ARENAS_ALL, bins, large, mutex);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t}%s\n",
+ ((destroyed_initialized &&
+ destroyed) || unmerged) ? "," :
+ "");
+ }
+ }
+
+ /* Destroyed stats. */
+ if (destroyed_initialized && destroyed) {
+ /* Print destroyed arena stats. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"destroyed\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "\nDestroyed arenas stats:\n");
+ }
+ stats_arena_print(write_cb, cbopaque, json,
+ MALLCTL_ARENAS_DESTROYED, bins, large,
+ mutex);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t}%s\n", unmerged ? "," :
@@ -1057,7 +1194,7 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
}
stats_arena_print(write_cb,
cbopaque, json, i, bins,
- large, huge);
+ large, mutex);
if (json) {
malloc_cprintf(write_cb,
cbopaque,
@@ -1079,18 +1216,13 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
+ const char *opts) {
int err;
uint64_t epoch;
size_t u64sz;
- bool json = false;
- bool general = true;
- bool merged = config_stats;
- bool unmerged = config_stats;
- bool bins = true;
- bool large = true;
- bool huge = true;
+#define OPTION(o, v, d, s) bool v = d;
+ STATS_PRINT_OPTIONS
+#undef OPTION
/*
* Refresh stats, in case mallctl() was called by the application.
@@ -1115,31 +1247,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (opts != NULL) {
- unsigned i;
-
- for (i = 0; opts[i] != '\0'; i++) {
+ for (unsigned i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
- case 'J':
- json = true;
- break;
- case 'g':
- general = false;
- break;
- case 'm':
- merged = false;
- break;
- case 'a':
- unmerged = false;
- break;
- case 'b':
- bins = false;
- break;
- case 'l':
- large = false;
- break;
- case 'h':
- huge = false;
- break;
+#define OPTION(o, v, d, s) case o: v = s; break;
+ STATS_PRINT_OPTIONS
+#undef OPTION
default:;
}
}
@@ -1155,13 +1267,13 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (general) {
- bool more = (merged || unmerged);
- stats_general_print(write_cb, cbopaque, json, more);
+ stats_general_print(write_cb, cbopaque, json, config_stats);
}
if (config_stats) {
- stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
- bins, large, huge);
+ stats_print_helper(write_cb, cbopaque, json, merged, destroyed,
+ unmerged, bins, large, mutex);
}
+
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t}\n"
diff --git a/contrib/jemalloc/src/sz.c b/contrib/jemalloc/src/sz.c
new file mode 100644
index 000000000000..0986615f711b
--- /dev/null
+++ b/contrib/jemalloc/src/sz.c
@@ -0,0 +1,106 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/sz.h"
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t sz_pind2sz_tab[NPSIZES+1] = {
+#define PSZ_yes(lg_grp, ndelta, lg_delta) \
+ (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
+#define PSZ_no(lg_grp, ndelta, lg_delta)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
+ PSZ_##psz(lg_grp, ndelta, lg_delta)
+ SIZE_CLASSES
+#undef PSZ_yes
+#undef PSZ_no
+#undef SC
+ (LARGE_MAXCLASS + PAGE)
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t sz_index2size_tab[NSIZES] = {
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
+ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
+ SIZE_CLASSES
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t sz_size2index_tab[] = {
+#if LG_TINY_MIN == 0
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_0(i) i,
+#elif LG_TINY_MIN == 1
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_1(i) i,
+#elif LG_TINY_MIN == 2
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_2(i) i,
+#elif LG_TINY_MIN == 3
+#define S2B_3(i) i,
+#elif LG_TINY_MIN == 4
+#define S2B_4(i) i,
+#elif LG_TINY_MIN == 5
+#define S2B_5(i) i,
+#elif LG_TINY_MIN == 6
+#define S2B_6(i) i,
+#elif LG_TINY_MIN == 7
+#define S2B_7(i) i,
+#elif LG_TINY_MIN == 8
+#define S2B_8(i) i,
+#elif LG_TINY_MIN == 9
+#define S2B_9(i) i,
+#elif LG_TINY_MIN == 10
+#define S2B_10(i) i,
+#elif LG_TINY_MIN == 11
+#define S2B_11(i) i,
+#else
+#error "Unsupported LG_TINY_MIN"
+#endif
+#if LG_TINY_MIN < 1
+#define S2B_1(i) S2B_0(i) S2B_0(i)
+#endif
+#if LG_TINY_MIN < 2
+#define S2B_2(i) S2B_1(i) S2B_1(i)
+#endif
+#if LG_TINY_MIN < 3
+#define S2B_3(i) S2B_2(i) S2B_2(i)
+#endif
+#if LG_TINY_MIN < 4
+#define S2B_4(i) S2B_3(i) S2B_3(i)
+#endif
+#if LG_TINY_MIN < 5
+#define S2B_5(i) S2B_4(i) S2B_4(i)
+#endif
+#if LG_TINY_MIN < 6
+#define S2B_6(i) S2B_5(i) S2B_5(i)
+#endif
+#if LG_TINY_MIN < 7
+#define S2B_7(i) S2B_6(i) S2B_6(i)
+#endif
+#if LG_TINY_MIN < 8
+#define S2B_8(i) S2B_7(i) S2B_7(i)
+#endif
+#if LG_TINY_MIN < 9
+#define S2B_9(i) S2B_8(i) S2B_8(i)
+#endif
+#if LG_TINY_MIN < 10
+#define S2B_10(i) S2B_9(i) S2B_9(i)
+#endif
+#if LG_TINY_MIN < 11
+#define S2B_11(i) S2B_10(i) S2B_10(i)
+#endif
+#define S2B_no(i)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
+ S2B_##lg_delta_lookup(index)
+ SIZE_CLASSES
+#undef S2B_3
+#undef S2B_4
+#undef S2B_5
+#undef S2B_6
+#undef S2B_7
+#undef S2B_8
+#undef S2B_9
+#undef S2B_10
+#undef S2B_11
+#undef S2B_no
+#undef SC
+};
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index e3b04be64070..6355805b196f 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -1,5 +1,10 @@
-#define JEMALLOC_TCACHE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/size_classes.h"
/******************************************************************************/
/* Data. */
@@ -27,19 +32,20 @@ static malloc_mutex_t tcaches_mtx;
/******************************************************************************/
size_t
-tcache_salloc(tsdn_t *tsdn, const void *ptr)
-{
-
- return (arena_salloc(tsdn, ptr, false));
+tcache_salloc(tsdn_t *tsdn, const void *ptr) {
+ return arena_salloc(tsdn, ptr);
}
void
-tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
-{
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
szind_t binind = tcache->next_gc_bin;
- tcache_bin_t *tbin = &tcache->tbins[binind];
- tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+ tcache_bin_t *tbin;
+ if (binind < NBINS) {
+ tbin = tcache_small_bin_get(tcache, binind);
+ } else {
+ tbin = tcache_large_bin_get(tcache, binind);
+ }
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
@@ -48,71 +54,80 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_small(tsd, tcache, tbin, binind,
tbin->ncached - tbin->low_water + (tbin->low_water
>> 2));
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that
+ * the fill count is always at least 1.
+ */
+ tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+ if ((tbin_info->ncached_max >>
+ (tcache->lg_fill_div[binind] + 1)) >= 1) {
+ tcache->lg_fill_div[binind]++;
+ }
} else {
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
- tbin->low_water + (tbin->low_water >> 2), tcache);
}
- /*
- * Reduce fill count by 2X. Limit lg_fill_div such that the
- * fill count is always at least 1.
- */
- if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
- tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
- * Increase fill count by 2X. Make sure lg_fill_div stays
- * greater than 0.
+ * Increase fill count by 2X for small bins. Make sure
+ * lg_fill_div stays greater than 0.
*/
- if (tbin->lg_fill_div > 1)
- tbin->lg_fill_div--;
+ if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
+ tcache->lg_fill_div[binind]--;
+ }
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins)
+ if (tcache->next_gc_bin == nhbins) {
tcache->next_gc_bin = 0;
+ }
}
void *
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
-{
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
void *ret;
- arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
- tcache->prof_accumbytes : 0);
- if (config_prof)
+ assert(tcache->arena != NULL);
+ arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
+ config_prof ? tcache->prof_accumbytes : 0);
+ if (config_prof) {
tcache->prof_accumbytes = 0;
+ }
ret = tcache_alloc_easy(tbin, tcache_success);
- return (ret);
+ return ret;
}
void
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem)
-{
- arena_t *arena;
- void *ptr;
- unsigned i, nflush, ndeferred;
+ szind_t binind, unsigned rem) {
bool merged_stats = false;
assert(binind < NBINS);
assert(rem <= tbin->ncached);
- arena = arena_choose(tsd, NULL);
+ arena_t *arena = tcache->arena;
assert(arena != NULL);
- for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ unsigned nflush = tbin->ncached - rem;
+ VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ /* Look up extent once per item. */
+ for (unsigned i = 0 ; i < nflush; i++) {
+ item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
+ }
+
+ while (nflush > 0) {
/* Lock the arena bin associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- *(tbin->avail - 1));
- arena_t *bin_arena = extent_node_arena_get(&chunk->node);
+ extent_t *extent = item_extent[0];
+ arena_t *bin_arena = extent_arena_get(extent);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes))
+ tcache->prof_accumbytes)) {
prof_idump(tsd_tsdn(tsd));
+ }
tcache->prof_accumbytes = 0;
}
@@ -124,18 +139,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
- ndeferred = 0;
- for (i = 0; i < nflush; i++) {
- ptr = *(tbin->avail - 1 - i);
- assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (extent_node_arena_get(&chunk->node) == bin_arena) {
- size_t pageind = ((uintptr_t)ptr -
- (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_bits_t *bitselm =
- arena_bitselm_get_mutable(chunk, pageind);
+ unsigned ndeferred = 0;
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ extent = item_extent[i];
+ assert(ptr != NULL && extent != NULL);
+
+ if (extent_arena_get(extent) == bin_arena) {
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, chunk, ptr, bitselm);
+ bin_arena, extent, ptr);
} else {
/*
* This object was allocated via a different
@@ -144,11 +156,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* handled in a future pass.
*/
*(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ nflush = ndeferred;
}
if (config_stats && !merged_stats) {
/*
@@ -166,58 +180,72 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
+ if ((low_water_t)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
+ }
}
void
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache)
-{
- arena_t *arena;
- void *ptr;
- unsigned i, nflush, ndeferred;
+ unsigned rem, tcache_t *tcache) {
bool merged_stats = false;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
- arena = arena_choose(tsd, NULL);
+ arena_t *arena = tcache->arena;
assert(arena != NULL);
- for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ unsigned nflush = tbin->ncached - rem;
+ VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ /* Look up extent once per item. */
+ for (unsigned i = 0 ; i < nflush; i++) {
+ item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
+ }
+
+ while (nflush > 0) {
/* Lock the arena associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- *(tbin->avail - 1));
- arena_t *locked_arena = extent_node_arena_get(&chunk->node);
+ extent_t *extent = item_extent[0];
+ arena_t *locked_arena = extent_arena_get(extent);
UNUSED bool idump;
- if (config_prof)
+ if (config_prof) {
idump = false;
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ extent = item_extent[i];
+ if (extent_arena_get(extent) == locked_arena) {
+ large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
+ extent);
+ }
+ }
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
- idump = arena_prof_accum_locked(arena,
+ idump = arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
- arena->stats.nrequests_large +=
- tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
- tbin->tstats.nrequests;
+ arena_stats_large_nrequests_add(tsd_tsdn(tsd),
+ &arena->stats, binind,
+ tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
}
- ndeferred = 0;
- for (i = 0; i < nflush; i++) {
- ptr = *(tbin->avail - 1 - i);
- assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (extent_node_arena_get(&chunk->node) ==
- locked_arena) {
- arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
- locked_arena, chunk, ptr);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+
+ unsigned ndeferred = 0;
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ extent = item_extent[i];
+ assert(ptr != NULL && extent != NULL);
+
+ if (extent_arena_get(extent) == locked_arena) {
+ large_dalloc_finish(tsd_tsdn(tsd), extent);
} else {
/*
* This object was allocated via a different
@@ -226,55 +254,56 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* in a future pass.
*/
*(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
- if (config_prof && idump)
+ if (config_prof && idump) {
prof_idump(tsd_tsdn(tsd));
+ }
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
+ nflush = ndeferred;
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
- tbin->tstats.nrequests;
+ arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats,
+ binind, tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
+ if ((low_water_t)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
+ }
}
-static void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
+void
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ assert(tcache->arena == NULL);
+ tcache->arena = arena;
if (config_stats) {
/* Link into list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
-
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
+ arena_t *arena = tcache->arena;
+ assert(arena != NULL);
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@@ -288,150 +317,216 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tsdn, tcache, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
+ tcache->arena = NULL;
}
void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
- arena_t *newarena)
-{
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ tcache_arena_dissociate(tsdn, tcache);
+ tcache_arena_associate(tsdn, tcache, arena);
+}
+
+bool
+tsd_tcache_enabled_data_init(tsd_t *tsd) {
+ /* Called upon tsd initialization. */
+ tsd_tcache_enabled_set(tsd, opt_tcache);
+ tsd_slow_update(tsd);
+
+ if (opt_tcache) {
+ /* Trigger tcache init. */
+ tsd_tcache_data_init(tsd);
+ }
- tcache_arena_dissociate(tsdn, tcache, oldarena);
- tcache_arena_associate(tsdn, tcache, newarena);
+ return false;
}
-tcache_t *
-tcache_get_hard(tsd_t *tsd)
-{
- arena_t *arena;
+/* Initialize auto tcache (embedded in TSD). */
+static void
+tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
+ memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
+ tcache->prof_accumbytes = 0;
+ tcache->next_gc_bin = 0;
+ tcache->arena = NULL;
+
+ ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
- if (!tcache_enabled_get()) {
- if (tsd_nominal(tsd))
- tcache_enabled_set(false); /* Memoize. */
- return (NULL);
+ size_t stack_offset = 0;
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS);
+ memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS));
+ unsigned i = 0;
+ for (; i < NBINS; i++) {
+ tcache->lg_fill_div[i] = 1;
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ /*
+ * avail points past the available space. Allocations will
+ * access the slots toward higher addresses (for the benefit of
+ * prefetch).
+ */
+ tcache_small_bin_get(tcache, i)->avail =
+ (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ }
+ for (; i < nhbins; i++) {
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ tcache_large_bin_get(tcache, i)->avail =
+ (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
}
- arena = arena_choose(tsd, NULL);
- if (unlikely(arena == NULL))
- return (NULL);
- return (tcache_create(tsd_tsdn(tsd), arena));
+ assert(stack_offset == stack_nelms * sizeof(void *));
}
+/* Initialize auto tcache (embedded in TSD). */
+bool
+tsd_tcache_data_init(tsd_t *tsd) {
+ tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
+ assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
+ size_t size = stack_nelms * sizeof(void *);
+ /* Avoid false cacheline sharing. */
+ size = sz_sa2u(size, CACHELINE);
+
+ void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
+ NULL, true, arena_get(TSDN_NULL, 0, true));
+ if (avail_array == NULL) {
+ return true;
+ }
+
+ tcache_init(tsd, tcache, avail_array);
+ /*
+ * Initialization is a bit tricky here. After malloc init is done, all
+ * threads can rely on arena_choose and associate tcache accordingly.
+ * However, the thread that does actual malloc bootstrapping relies on
+ * functional tsd, and it can only rely on a0. In that case, we
+ * associate its tcache to a0 temporarily, and later on
+ * arena_choose_hard() will re-associate properly.
+ */
+ tcache->arena = NULL;
+ arena_t *arena;
+ if (!malloc_initialized()) {
+ /* If in initialization, assign to a0. */
+ arena = arena_get(tsd_tsdn(tsd), 0, false);
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ } else {
+ arena = arena_choose(tsd, NULL);
+ /* This may happen if thread.tcache.enabled is used. */
+ if (tcache->arena == NULL) {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ }
+ }
+ assert(arena == tcache->arena);
+
+ return false;
+}
+
+/* Created manual tcache for tcache.create mallctl. */
tcache_t *
-tcache_create(tsdn_t *tsdn, arena_t *arena)
-{
+tcache_create_explicit(tsd_t *tsd) {
tcache_t *tcache;
size_t size, stack_offset;
- unsigned i;
- size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ size = sizeof(tcache_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
/* Avoid false cacheline sharing. */
- size = sa2u(size, CACHELINE);
+ size = sz_sa2u(size, CACHELINE);
- tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
+ tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
- if (tcache == NULL)
- return (NULL);
-
- tcache_arena_associate(tsdn, tcache, arena);
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
-
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- for (i = 0; i < nhbins; i++) {
- tcache->tbins[i].lg_fill_div = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
- tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
- (uintptr_t)stack_offset);
+ if (tcache == NULL) {
+ return NULL;
}
- return (tcache);
+ tcache_init(tsd, tcache,
+ (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
+
+ return tcache;
}
static void
-tcache_destroy(tsd_t *tsd, tcache_t *tcache)
-{
- arena_t *arena;
- unsigned i;
-
- arena = arena_choose(tsd, NULL);
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
+tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
+ assert(tcache->arena != NULL);
- for (i = 0; i < NBINS; i++) {
- tcache_bin_t *tbin = &tcache->tbins[i];
+ for (unsigned i = 0; i < NBINS; i++) {
+ tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
- if (config_stats && tbin->tstats.nrequests != 0) {
- arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ if (config_stats) {
+ assert(tbin->tstats.nrequests == 0);
}
}
-
- for (; i < nhbins; i++) {
- tcache_bin_t *tbin = &tcache->tbins[i];
+ for (unsigned i = NBINS; i < nhbins; i++) {
+ tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
- if (config_stats && tbin->tstats.nrequests != 0) {
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[i - NBINS].nrequests +=
- tbin->tstats.nrequests;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ if (config_stats) {
+ assert(tbin->tstats.nrequests == 0);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
+ arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
+ tcache->prof_accumbytes)) {
prof_idump(tsd_tsdn(tsd));
-
- idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+ }
}
void
-tcache_cleanup(tsd_t *tsd)
-{
- tcache_t *tcache;
-
- if (!config_tcache)
- return;
+tcache_flush(void) {
+ tsd_t *tsd = tsd_fetch();
+ assert(tcache_available(tsd));
+ tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
+}
- if ((tcache = tsd_tcache_get(tsd)) != NULL) {
- tcache_destroy(tsd, tcache);
- tsd_tcache_set(tsd, NULL);
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
+ tcache_flush_cache(tsd, tcache);
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
+
+ if (tsd_tcache) {
+ /* Release the avail array for the TSD embedded auto tcache. */
+ void *avail_array =
+ (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
+ (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
+ idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
+ } else {
+ /* Release both the tcache struct and avail array. */
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
}
}
+/* For auto tcache (embedded in TSD) only. */
void
-tcache_enabled_cleanup(tsd_t *tsd)
-{
+tcache_cleanup(tsd_t *tsd) {
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+ if (!tcache_available(tsd)) {
+ assert(tsd_tcache_enabled_get(tsd) == false);
+ if (config_debug) {
+ assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
+ }
+ return;
+ }
+ assert(tsd_tcache_enabled_get(tsd));
+ assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
- /* Do nothing. */
+ tcache_destroy(tsd, tcache, true);
+ if (config_debug) {
+ tcache_small_bin_get(tcache, 0)->avail = NULL;
+ }
}
void
-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
unsigned i;
cassert(config_stats);
- malloc_mutex_assert_owner(tsdn, &arena->lock);
-
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
- tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsdn, &bin->lock);
@@ -439,10 +534,9 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
for (; i < nhbins; i++) {
- malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
- tcache_bin_t *tbin = &tcache->tbins[i];
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- lstats->nrequests += tbin->tstats.nrequests;
+ tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
+ tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
}
@@ -454,8 +548,8 @@ tcaches_create_prep(tsd_t *tsd) {
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
- (MALLOCX_TCACHE_MAX+1));
+ tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
+ * (MALLOCX_TCACHE_MAX+1), CACHELINE);
if (tcaches == NULL) {
err = true;
goto label_return;
@@ -475,27 +569,22 @@ label_return:
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind) {
+ witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
+
bool err;
- arena_t *arena;
- tcache_t *tcache;
- tcaches_t *elm;
if (tcaches_create_prep(tsd)) {
err = true;
goto label_return;
}
- arena = arena_ichoose(tsd, NULL);
- if (unlikely(arena == NULL)) {
- err = true;
- goto label_return;
- }
- tcache = tcache_create(tsd_tsdn(tsd), arena);
+ tcache_t *tcache = tcache_create_explicit(tsd);
if (tcache == NULL) {
err = true;
goto label_return;
}
+ tcaches_t *elm;
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches_avail != NULL) {
elm = tcaches_avail;
@@ -512,69 +601,70 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
err = false;
label_return:
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &tcaches_mtx);
+ witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
return err;
}
-static void
-tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) {
+static tcache_t *
+tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
if (elm->tcache == NULL) {
- return;
+ return NULL;
}
- tcache_destroy(tsd, elm->tcache);
+ tcache_t *tcache = elm->tcache;
elm->tcache = NULL;
+ return tcache;
}
void
tcaches_flush(tsd_t *tsd, unsigned ind) {
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
- tcaches_elm_flush(tsd, &tcaches[ind]);
+ tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]);
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcache != NULL) {
+ tcache_destroy(tsd, tcache, false);
+ }
}
void
tcaches_destroy(tsd_t *tsd, unsigned ind) {
- tcaches_t *elm;
-
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
- elm = &tcaches[ind];
- tcaches_elm_flush(tsd, elm);
+ tcaches_t *elm = &tcaches[ind];
+ tcache_t *tcache = tcaches_elm_remove(tsd, elm);
elm->next = tcaches_avail;
tcaches_avail = elm;
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcache != NULL) {
+ tcache_destroy(tsd, tcache, false);
+ }
}
bool
tcache_boot(tsdn_t *tsdn) {
- unsigned i;
-
- cassert(config_tcache);
-
- /*
- * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
- * known.
- */
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ /* If necessary, clamp opt_lg_tcache_max. */
+ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
+ SMALL_MAXCLASS) {
tcache_maxclass = SMALL_MAXCLASS;
- else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
- tcache_maxclass = large_maxclass;
- else
+ } else {
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ }
- if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES)) {
+ if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
+ malloc_mutex_rank_exclusive)) {
return true;
}
- nhbins = size2index(tcache_maxclass) + 1;
+ nhbins = sz_size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
- sizeof(tcache_bin_info_t));
- if (tcache_bin_info == NULL)
- return (true);
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
+ * sizeof(tcache_bin_info_t), CACHELINE);
+ if (tcache_bin_info == NULL) {
+ return true;
+ }
stack_nelms = 0;
+ unsigned i;
for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
tcache_bin_info[i].ncached_max =
@@ -594,7 +684,7 @@ tcache_boot(tsdn_t *tsdn) {
stack_nelms += tcache_bin_info[i].ncached_max;
}
- return (false);
+ return false;
}
void
diff --git a/contrib/jemalloc/src/ticker.c b/contrib/jemalloc/src/ticker.c
index db0902404ef2..d7b8cd26c068 100644
--- a/contrib/jemalloc/src/ticker.c
+++ b/contrib/jemalloc/src/ticker.c
@@ -1,2 +1,3 @@
-#define JEMALLOC_TICKER_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_TICKER_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c
index ec69a51c3d36..97330332d119 100644
--- a/contrib/jemalloc/src/tsd.c
+++ b/contrib/jemalloc/src/tsd.c
@@ -1,5 +1,10 @@
-#define JEMALLOC_TSD_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
/******************************************************************************/
/* Data. */
@@ -7,29 +12,137 @@
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
-malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
+__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false;
+bool tsd_booted = false;
+#elif (defined(JEMALLOC_TLS))
+__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
+pthread_key_t tsd_tsd;
+bool tsd_booted = false;
+#elif (defined(_WIN32))
+DWORD tsd_tsd;
+tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
+bool tsd_booted = false;
+#else
+
+/*
+ * This contains a mutex, but it's pretty convenient to allow the mutex code to
+ * have a dependency on tsd. So we define the struct here, and only refer to it
+ * by pointer in the header.
+ */
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+
+pthread_key_t tsd_tsd;
+tsd_init_head_t tsd_init_head = {
+ ql_head_initializer(blocks),
+ MALLOC_MUTEX_INITIALIZER
+};
+tsd_wrapper_t tsd_boot_wrapper = {
+ false,
+ TSD_INITIALIZER
+};
+bool tsd_booted = false;
+#endif
+
/******************************************************************************/
-void *
-malloc_tsd_malloc(size_t size)
-{
+void
+tsd_slow_update(tsd_t *tsd) {
+ if (tsd_nominal(tsd)) {
+ if (malloc_slow || !tsd_tcache_enabled_get(tsd) ||
+ tsd_reentrancy_level_get(tsd) > 0) {
+ tsd->state = tsd_state_nominal_slow;
+ } else {
+ tsd->state = tsd_state_nominal;
+ }
+ }
+}
- return (a0malloc(CACHELINE_CEILING(size)));
+static bool
+tsd_data_init(tsd_t *tsd) {
+ /*
+ * We initialize the rtree context first (before the tcache), since the
+ * tcache initialization depends on it.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+
+ return tsd_tcache_enabled_data_init(tsd);
}
-void
-malloc_tsd_dalloc(void *wrapper)
-{
+static void
+assert_tsd_data_cleanup_done(tsd_t *tsd) {
+ assert(!tsd_nominal(tsd));
+ assert(*tsd_arenap_get_unsafe(tsd) == NULL);
+ assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
+ assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
+ assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
+ assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
+ assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
+}
- a0dalloc(wrapper);
+static bool
+tsd_data_init_nocleanup(tsd_t *tsd) {
+ assert(tsd->state == tsd_state_reincarnated);
+ /*
+ * During reincarnation, there is no guarantee that the cleanup function
+ * will be called (deallocation may happen after all tsd destructors).
+ * We set up tsd in a way that no cleanup is needed.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ *tsd_tcache_enabledp_get_unsafe(tsd) = false;
+ *tsd_reentrancy_levelp_get(tsd) = 1;
+ assert_tsd_data_cleanup_done(tsd);
+
+ return false;
}
-void
-malloc_tsd_no_cleanup(void *arg)
-{
+tsd_t *
+tsd_fetch_slow(tsd_t *tsd, bool internal) {
+ if (internal) {
+ /* For internal background threads use only. */
+ assert(tsd->state == tsd_state_uninitialized);
+ tsd->state = tsd_state_reincarnated;
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ return tsd;
+ }
+
+ if (tsd->state == tsd_state_nominal_slow) {
+ /* On slow path but no work needed. */
+ assert(malloc_slow || !tsd_tcache_enabled_get(tsd) ||
+ tsd_reentrancy_level_get(tsd) > 0 ||
+ *tsd_arenas_tdata_bypassp_get(tsd));
+ } else if (tsd->state == tsd_state_uninitialized) {
+ tsd->state = tsd_state_nominal;
+ tsd_slow_update(tsd);
+ /* Trigger cleanup handler registration. */
+ tsd_set(tsd);
+ tsd_data_init(tsd);
+ } else if (tsd->state == tsd_state_purgatory) {
+ tsd->state = tsd_state_reincarnated;
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ } else {
+ assert(tsd->state == tsd_state_reincarnated);
+ }
+
+ return tsd;
+}
- not_reached();
+void *
+malloc_tsd_malloc(size_t size) {
+ return a0malloc(CACHELINE_CEILING(size));
+}
+
+void
+malloc_tsd_dalloc(void *wrapper) {
+ a0dalloc(wrapper);
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
@@ -37,21 +150,22 @@ malloc_tsd_no_cleanup(void *arg)
JEMALLOC_EXPORT
#endif
void
-_malloc_thread_cleanup(void)
-{
+_malloc_thread_cleanup(void) {
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
- for (i = 0; i < ncleanups; i++)
+ for (i = 0; i < ncleanups; i++) {
pending[i] = true;
+ }
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
- if (pending[i])
+ if (pending[i]) {
again = true;
+ }
}
}
} while (again);
@@ -59,28 +173,41 @@ _malloc_thread_cleanup(void)
#endif
void
-malloc_tsd_cleanup_register(bool (*f)(void))
-{
-
+malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
+static void
+tsd_do_data_cleanup(tsd_t *tsd) {
+ prof_tdata_cleanup(tsd);
+ iarena_cleanup(tsd);
+ arena_cleanup(tsd);
+ arenas_tdata_cleanup(tsd);
+ tcache_cleanup(tsd);
+ witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
+}
+
void
-tsd_cleanup(void *arg)
-{
+tsd_cleanup(void *arg) {
tsd_t *tsd = (tsd_t *)arg;
switch (tsd->state) {
case tsd_state_uninitialized:
/* Do nothing. */
break;
+ case tsd_state_reincarnated:
+ /*
+ * Reincarnated means another destructor deallocated memory
+ * after the destructor was called. Cleanup isn't required but
+ * is still called for testing and completeness.
+ */
+ assert_tsd_data_cleanup_done(tsd);
+ /* Fall through. */
case tsd_state_nominal:
-#define O(n, t) \
- n##_cleanup(tsd);
-MALLOC_TSD
-#undef O
+ case tsd_state_nominal_slow:
+ tsd_do_data_cleanup(tsd);
tsd->state = tsd_state_purgatory;
tsd_set(tsd);
break;
@@ -92,46 +219,43 @@ MALLOC_TSD
* nothing, and do not request another callback.
*/
break;
- case tsd_state_reincarnated:
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset state to tsd_state_purgatory and request
- * another callback.
- */
- tsd->state = tsd_state_purgatory;
- tsd_set(tsd);
- break;
default:
not_reached();
}
+#ifdef JEMALLOC_JET
+ test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
+ int *data = tsd_test_datap_get_unsafe(tsd);
+ if (test_callback != NULL) {
+ test_callback(data);
+ }
+#endif
}
tsd_t *
-malloc_tsd_boot0(void)
-{
+malloc_tsd_boot0(void) {
tsd_t *tsd;
ncleanups = 0;
- if (tsd_boot0())
- return (NULL);
+ if (tsd_boot0()) {
+ return NULL;
+ }
tsd = tsd_fetch();
*tsd_arenas_tdata_bypassp_get(tsd) = true;
- return (tsd);
+ return tsd;
}
void
-malloc_tsd_boot1(void)
-{
-
+malloc_tsd_boot1(void) {
tsd_boot1();
- *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
+ tsd_t *tsd = tsd_fetch();
+ /* malloc_slow has been set properly. Update tsd_slow. */
+ tsd_slow_update(tsd);
+ *tsd_arenas_tdata_bypassp_get(tsd) = false;
}
#ifdef _WIN32
static BOOL WINAPI
-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
-{
-
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
@@ -144,9 +268,18 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
default:
break;
}
- return (true);
+ return true;
}
+/*
+ * We need to be able to say "read" here (in the "pragma section"), but have
+ * hooked "read". We won't read for the rest of the file, so we can get away
+ * with unhooking.
+ */
+#ifdef read
+# undef read
+#endif
+
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
@@ -165,8 +298,7 @@ BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *
-tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
-{
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
pthread_t self = pthread_self();
tsd_init_block_t *iter;
@@ -175,7 +307,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(TSDN_NULL, &head->lock);
- return (iter->data);
+ return iter->data;
}
}
/* Insert block into list. */
@@ -183,13 +315,11 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
- return (NULL);
+ return NULL;
}
void
-tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
-{
-
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
diff --git a/contrib/jemalloc/src/witness.c b/contrib/jemalloc/src/witness.c
index c3a65f7cdcd1..f42b72ad1a2c 100644
--- a/contrib/jemalloc/src/witness.c
+++ b/contrib/jemalloc/src/witness.c
@@ -1,23 +1,22 @@
-#define JEMALLOC_WITNESS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_WITNESS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
void
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp)
-{
-
+ witness_comp_t *comp, void *opaque) {
witness->name = name;
witness->rank = rank;
witness->comp = comp;
+ witness->opaque = opaque;
}
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
-#endif
-void
-witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
-{
+static void
+witness_lock_error_impl(const witness_list_t *witnesses,
+ const witness_t *witness) {
witness_t *w;
malloc_printf("<jemalloc>: Lock rank order reversal:");
@@ -27,55 +26,28 @@ witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define witness_lock_error JEMALLOC_N(witness_lock_error)
-witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
-#endif
-void
-witness_owner_error(const witness_t *witness)
-{
+witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
+static void
+witness_owner_error_impl(const witness_t *witness) {
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
abort();
}
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define witness_owner_error JEMALLOC_N(witness_owner_error)
-witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
-#endif
-void
-witness_not_owner_error(const witness_t *witness)
-{
+witness_owner_error_t *JET_MUTABLE witness_owner_error =
+ witness_owner_error_impl;
+static void
+witness_not_owner_error_impl(const witness_t *witness) {
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
witness->rank);
abort();
}
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-witness_not_owner_error_t *witness_not_owner_error =
- JEMALLOC_N(n_witness_not_owner_error);
-#endif
+witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
+ witness_not_owner_error_impl;
-#ifdef JEMALLOC_JET
-#undef witness_depth_error
-#define witness_depth_error JEMALLOC_N(n_witness_depth_error)
-#endif
-void
-witness_depth_error(const witness_list_t *witnesses,
+static void
+witness_depth_error_impl(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
witness_t *w;
@@ -87,50 +59,42 @@ witness_depth_error(const witness_list_t *witnesses,
malloc_printf("\n");
abort();
}
-#ifdef JEMALLOC_JET
-#undef witness_depth_error
-#define witness_depth_error JEMALLOC_N(witness_depth_error)
-witness_depth_error_t *witness_depth_error = JEMALLOC_N(n_witness_depth_error);
-#endif
+witness_depth_error_t *JET_MUTABLE witness_depth_error =
+ witness_depth_error_impl;
void
-witnesses_cleanup(tsd_t *tsd)
-{
-
- witness_assert_lockless(tsd_tsdn(tsd));
+witnesses_cleanup(witness_tsd_t *witness_tsd) {
+ witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
/* Do nothing. */
}
void
-witness_fork_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
-void
-witness_prefork(tsd_t *tsd)
-{
-
- tsd_witness_fork_set(tsd, true);
+witness_prefork(witness_tsd_t *witness_tsd) {
+ if (!config_debug) {
+ return;
+ }
+ witness_tsd->forking = true;
}
void
-witness_postfork_parent(tsd_t *tsd)
-{
-
- tsd_witness_fork_set(tsd, false);
+witness_postfork_parent(witness_tsd_t *witness_tsd) {
+ if (!config_debug) {
+ return;
+ }
+ witness_tsd->forking = false;
}
void
-witness_postfork_child(tsd_t *tsd)
-{
+witness_postfork_child(witness_tsd_t *witness_tsd) {
+ if (!config_debug) {
+ return;
+ }
#ifndef JEMALLOC_MUTEX_INIT_CB
witness_list_t *witnesses;
- witnesses = tsd_witnessesp_get(tsd);
+ witnesses = &witness_tsd->witnesses;
ql_new(witnesses);
#endif
- tsd_witness_fork_set(tsd, false);
+ witness_tsd->forking = false;
}