aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib/scudo
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib/scudo')
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator.cpp820
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator.h125
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator_combined.h75
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator_secondary.h192
-rw-r--r--compiler-rt/lib/scudo/scudo_crc32.cpp24
-rw-r--r--compiler-rt/lib/scudo/scudo_crc32.h100
-rw-r--r--compiler-rt/lib/scudo/scudo_errors.cpp77
-rw-r--r--compiler-rt/lib/scudo/scudo_errors.h34
-rw-r--r--compiler-rt/lib/scudo/scudo_flags.cpp136
-rw-r--r--compiler-rt/lib/scudo/scudo_flags.h32
-rw-r--r--compiler-rt/lib/scudo/scudo_flags.inc48
-rw-r--r--compiler-rt/lib/scudo/scudo_interface_internal.h32
-rw-r--r--compiler-rt/lib/scudo/scudo_malloc.cpp84
-rw-r--r--compiler-rt/lib/scudo/scudo_new_delete.cpp107
-rw-r--r--compiler-rt/lib/scudo/scudo_platform.h93
-rw-r--r--compiler-rt/lib/scudo/scudo_termination.cpp41
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd.h65
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp67
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_exclusive.inc47
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_shared.cpp107
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_shared.inc55
-rw-r--r--compiler-rt/lib/scudo/scudo_utils.cpp134
-rw-r--r--compiler-rt/lib/scudo/scudo_utils.h36
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.h80
-rw-r--r--compiler-rt/lib/scudo/standalone/atomic_helpers.h139
-rw-r--r--compiler-rt/lib/scudo/standalone/bytemap.h111
-rw-r--r--compiler-rt/lib/scudo/standalone/checksum.cpp70
-rw-r--r--compiler-rt/lib/scudo/standalone/checksum.h54
-rw-r--r--compiler-rt/lib/scudo/standalone/chunk.h156
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h596
-rw-r--r--compiler-rt/lib/scudo/standalone/common.cpp32
-rw-r--r--compiler-rt/lib/scudo/standalone/common.h176
-rw-r--r--compiler-rt/lib/scudo/standalone/crc32_hw.cpp19
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.cpp57
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.h30
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.inc50
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.cpp164
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.h55
-rw-r--r--compiler-rt/lib/scudo/standalone/fuchsia.cpp189
-rw-r--r--compiler-rt/lib/scudo/standalone/fuchsia.h31
-rw-r--r--compiler-rt/lib/scudo/standalone/interface.h29
-rw-r--r--compiler-rt/lib/scudo/standalone/internal_defs.h133
-rw-r--r--compiler-rt/lib/scudo/standalone/linux.cpp171
-rw-r--r--compiler-rt/lib/scudo/standalone/linux.h70
-rw-r--r--compiler-rt/lib/scudo/standalone/list.h156
-rw-r--r--compiler-rt/lib/scudo/standalone/local_cache.h181
-rw-r--r--compiler-rt/lib/scudo/standalone/mutex.h73
-rw-r--r--compiler-rt/lib/scudo/standalone/platform.h70
-rw-r--r--compiler-rt/lib/scudo/standalone/primary32.h411
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h392
-rw-r--r--compiler-rt/lib/scudo/standalone/quarantine.h289
-rw-r--r--compiler-rt/lib/scudo/standalone/release.h262
-rw-r--r--compiler-rt/lib/scudo/standalone/report.cpp192
-rw-r--r--compiler-rt/lib/scudo/standalone/report.h57
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.cpp135
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h98
-rw-r--r--compiler-rt/lib/scudo/standalone/size_class_map.h151
-rw-r--r--compiler-rt/lib/scudo/standalone/stats.h105
-rw-r--r--compiler-rt/lib/scudo/standalone/string_utils.cpp244
-rw-r--r--compiler-rt/lib/scudo/standalone/string_utils.h43
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd.h66
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_exclusive.h118
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_shared.h168
-rw-r--r--compiler-rt/lib/scudo/standalone/vector.h118
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.cpp39
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.h52
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.inc186
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp49
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c_checks.h67
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp107
70 files changed, 8772 insertions, 0 deletions
diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp
new file mode 100644
index 000000000000..b2ebc9705930
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -0,0 +1,820 @@
+//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Hardened Allocator implementation.
+/// It uses the sanitizer_common allocator as a base and aims at mitigating
+/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
+/// header, a delayed free list, and additional sanity checks.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_crc32.h"
+#include "scudo_errors.h"
+#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
+#include "scudo_tsd.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+
+#ifdef GWP_ASAN_HOOKS
+# include "gwp_asan/guarded_pool_allocator.h"
+# include "gwp_asan/optional/backtrace.h"
+# include "gwp_asan/optional/options_parser.h"
+#endif // GWP_ASAN_HOOKS
+
+#include <errno.h>
+#include <string.h>
+
+namespace __scudo {
+
+// Global static cookie, initialized at start-up.
+static u32 Cookie;
+
+// We default to software CRC32 if the alternatives are not supported, either
+// at compilation or at runtime.
+static atomic_uint8_t HashAlgorithm = { CRC32Software };
+
+INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
+ // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+ // as opposed to only for scudo_crc32.cpp. This means that other hardware
+ // specific instructions were likely emitted at other places, and as a
+ // result there is no reason to not use it here.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ Crc = CRC32_INTRINSIC(Crc, Value);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = CRC32_INTRINSIC(Crc, Array[i]);
+ return Crc;
+#else
+ if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
+ Crc = computeHardwareCRC32(Crc, Value);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = computeHardwareCRC32(Crc, Array[i]);
+ return Crc;
+ }
+ Crc = computeSoftwareCRC32(Crc, Value);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = computeSoftwareCRC32(Crc, Array[i]);
+ return Crc;
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+static BackendT &getBackend();
+
+namespace Chunk {
+ static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize());
+ }
+ static INLINE
+ const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+ return reinterpret_cast<const AtomicPackedHeader *>(
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+ }
+
+ static INLINE bool isAligned(const void *Ptr) {
+ return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
+ }
+
+ // We can't use the offset member of the chunk itself, as we would double
+ // fetch it without any warranty that it wouldn't have been tampered. To
+ // prevent this, we work with a local copy of the header.
+ static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize() - (Header->Offset << MinAlignmentLog));
+ }
+
+ // Returns the usable size for a chunk, meaning the amount of bytes from the
+ // beginning of the user data to the end of the backend allocated chunk.
+ static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
+ const uptr ClassId = Header->ClassId;
+ if (ClassId)
+ return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
+ (Header->Offset << MinAlignmentLog);
+ return SecondaryT::GetActuallyAllocatedSize(
+ getBackendPtr(Ptr, Header)) - getHeaderSize();
+ }
+
+ // Returns the size the user requested when allocating the chunk.
+ static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (Header->ClassId)
+ return SizeOrUnusedBytes;
+ return SecondaryT::GetActuallyAllocatedSize(
+ getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
+ }
+
+ // Compute the checksum of the chunk pointer and its header.
+ static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
+ HeaderHolder, ARRAY_SIZE(HeaderHolder));
+ return static_cast<u16>(Crc);
+ }
+
+ // Checks the validity of a chunk by verifying its checksum. It doesn't
+ // incur termination in the event of an invalid chunk.
+ static INLINE bool isValid(const void *Ptr) {
+ PackedHeader NewPackedHeader =
+ atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ UnpackedHeader NewUnpackedHeader =
+ bit_cast<UnpackedHeader>(NewPackedHeader);
+ return (NewUnpackedHeader.Checksum ==
+ computeChecksum(Ptr, &NewUnpackedHeader));
+ }
+
+ // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
+ // for a fully nulled out header, its state will be available anyway.
+ COMPILER_CHECK(ChunkAvailable == 0);
+
+ // Loads and unpacks the header, verifying the checksum in the process.
+ static INLINE
+ void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader =
+ atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ if (UNLIKELY(NewUnpackedHeader->Checksum !=
+ computeChecksum(Ptr, NewUnpackedHeader)))
+ dieWithMessage("corrupted chunk header at address %p\n", Ptr);
+ }
+
+ // Packs and stores the header, computing the checksum in the process.
+ static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+ }
+
+ // Packs and stores the header, computing the checksum in the process. We
+ // compare the current header with the expected provided one to ensure that
+ // we are not being raced by a corruption occurring in another thread.
+ static INLINE void compareExchangeHeader(void *Ptr,
+ UnpackedHeader *NewUnpackedHeader,
+ UnpackedHeader *OldUnpackedHeader) {
+ NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+ if (UNLIKELY(!atomic_compare_exchange_strong(
+ getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+ memory_order_relaxed)))
+ dieWithMessage("race on chunk header at address %p\n", Ptr);
+ }
+} // namespace Chunk
+
+struct QuarantineCallback {
+ explicit QuarantineCallback(AllocatorCacheT *Cache)
+ : Cache_(Cache) {}
+
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
+ void Recycle(void *Ptr) {
+ UnpackedHeader Header;
+ Chunk::loadHeader(Ptr, &Header);
+ if (UNLIKELY(Header.State != ChunkQuarantine))
+ dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
+ UnpackedHeader NewHeader = Header;
+ NewHeader.State = ChunkAvailable;
+ Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
+ void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
+ if (Header.ClassId)
+ getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
+ else
+ getBackend().deallocateSecondary(BackendPtr);
+ }
+
+ // Internal quarantine allocation and deallocation functions. We first check
+ // that the batches are indeed serviced by the Primary.
+ // TODO(kostyak): figure out the best way to protect the batches.
+ void *Allocate(uptr Size) {
+ const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+ return getBackend().allocatePrimary(Cache_, BatchClassId);
+ }
+
+ void Deallocate(void *Ptr) {
+ const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+ getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
+ }
+
+ AllocatorCacheT *Cache_;
+ COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
+};
+
+typedef Quarantine<QuarantineCallback, void> QuarantineT;
+typedef QuarantineT::Cache QuarantineCacheT;
+COMPILER_CHECK(sizeof(QuarantineCacheT) <=
+ sizeof(ScudoTSD::QuarantineCachePlaceHolder));
+
+QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
+ return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
+}
+
+#ifdef GWP_ASAN_HOOKS
+static gwp_asan::GuardedPoolAllocator GuardedAlloc;
+#endif // GWP_ASAN_HOOKS
+
+struct Allocator {
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
+
+ BackendT Backend;
+ QuarantineT Quarantine;
+
+ u32 QuarantineChunksUpToSize;
+
+ bool DeallocationTypeMismatch;
+ bool ZeroContents;
+ bool DeleteSizeMismatch;
+
+ bool CheckRssLimit;
+ uptr HardRssLimitMb;
+ uptr SoftRssLimitMb;
+ atomic_uint8_t RssLimitExceeded;
+ atomic_uint64_t RssLastCheckedAtNS;
+
+ explicit Allocator(LinkerInitialized)
+ : Quarantine(LINKER_INITIALIZED) {}
+
+ NOINLINE void performSanityChecks();
+
+ void init() {
+ SanitizerToolName = "Scudo";
+ PrimaryAllocatorName = "ScudoPrimary";
+ SecondaryAllocatorName = "ScudoSecondary";
+
+ initFlags();
+
+ performSanityChecks();
+
+ // Check if hardware CRC32 is supported in the binary and by the platform,
+ // if so, opt for the CRC32 hardware version of the checksum.
+ if (&computeHardwareCRC32 && hasHardwareCRC32())
+ atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
+
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ Backend.init(common_flags()->allocator_release_to_os_interval_ms);
+ HardRssLimitMb = common_flags()->hard_rss_limit_mb;
+ SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
+ Quarantine.Init(
+ static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
+ static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
+ QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
+ getFlags()->QuarantineChunksUpToSize;
+ DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
+ DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
+ ZeroContents = getFlags()->ZeroContents;
+
+ if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
+ /*blocking=*/false))) {
+ Cookie = static_cast<u32>((NanoTime() >> 12) ^
+ (reinterpret_cast<uptr>(this) >> 4));
+ }
+
+ CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
+ if (CheckRssLimit)
+ atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
+ }
+
+ // Helper function that checks for a valid Scudo chunk. nullptr isn't.
+ bool isValidPointer(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return false;
+ if (!Chunk::isAligned(Ptr))
+ return false;
+ return Chunk::isValid(Ptr);
+ }
+
+ NOINLINE bool isRssLimitExceeded();
+
+ // Allocates a chunk.
+ void *allocate(uptr Size, uptr Alignment, AllocType Type,
+ bool ForceZeroContents = false) {
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.shouldSample())) {
+ if (void *Ptr = GuardedAlloc.allocate(Size))
+ return Ptr;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (UNLIKELY(Alignment < MinAlignment))
+ Alignment = MinAlignment;
+
+ const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
+ Chunk::getHeaderSize();
+ const uptr AlignedSize = (Alignment > MinAlignment) ?
+ NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
+ if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
+ UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
+ }
+
+ if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportRssLimitExceeded();
+ }
+
+ // Primary and Secondary backed allocations have a different treatment. We
+ // deal with alignment requirements of Primary serviced allocations here,
+ // but the Secondary will take care of its own alignment needs.
+ void *BackendPtr;
+ uptr BackendSize;
+ u8 ClassId;
+ if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
+ BackendSize = AlignedSize;
+ ClassId = SizeClassMap::ClassID(BackendSize);
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ BackendSize = NeededSize;
+ ClassId = 0;
+ BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
+ }
+ if (UNLIKELY(!BackendPtr)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportOutOfMemory(Size);
+ }
+
+ // If requested, we will zero out the entire contents of the returned chunk.
+ if ((ForceZeroContents || ZeroContents) && ClassId)
+ memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
+
+ UnpackedHeader Header = {};
+ uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
+ if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
+ // Since the Secondary takes care of alignment, a non-aligned pointer
+ // means it is from the Primary. It is also the only case where the offset
+ // field of the header would be non-zero.
+ DCHECK(ClassId);
+ const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
+ Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
+ UserPtr = AlignedUserPtr;
+ }
+ DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
+ Header.State = ChunkAllocated;
+ Header.AllocType = Type;
+ if (ClassId) {
+ Header.ClassId = ClassId;
+ Header.SizeOrUnusedBytes = Size;
+ } else {
+ // The secondary fits the allocations to a page, so the amount of unused
+ // bytes is the difference between the end of the user allocation and the
+ // next page boundary.
+ const uptr PageSize = GetPageSizeCached();
+ const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
+ if (TrailingBytes)
+ Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
+ }
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ Chunk::storeHeader(Ptr, &Header);
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
+ __sanitizer_malloc_hook(Ptr, Size);
+ return Ptr;
+ }
+
+ // Place a chunk in the quarantine or directly deallocate it in the event of
+ // a zero-sized quarantine, or if the size of the chunk is greater than the
+ // quarantine chunk size threshold.
+ void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
+ uptr Size) {
+ const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
+ if (BypassQuarantine) {
+ UnpackedHeader NewHeader = *Header;
+ NewHeader.State = ChunkAvailable;
+ Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
+ void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
+ if (Header->ClassId) {
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
+ Header->ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ getBackend().deallocateSecondary(BackendPtr);
+ }
+ } else {
+ // If a small memory amount was allocated with a larger alignment, we want
+ // to take that into account. Otherwise the Quarantine would be filled
+ // with tiny chunks, taking a lot of VA memory. This is an approximation
+ // of the usable size, that allows us to not call
+ // GetActuallyAllocatedSize.
+ const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
+ UnpackedHeader NewHeader = *Header;
+ NewHeader.State = ChunkQuarantine;
+ Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
+ Ptr, EstimatedSize);
+ if (UnlockRequired)
+ TSD->unlock();
+ }
+ }
+
+ // Deallocates a Chunk, which means either adding it to the quarantine or
+ // directly returning it to the backend if criteria are met.
+ void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
+ AllocType Type) {
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
+ __sanitizer_free_hook(Ptr);
+ if (UNLIKELY(!Ptr))
+ return;
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
+ GuardedAlloc.deallocate(Ptr);
+ return;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(!Chunk::isAligned(Ptr)))
+ dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
+ UnpackedHeader Header;
+ Chunk::loadHeader(Ptr, &Header);
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
+ if (DeallocationTypeMismatch) {
+ // The deallocation type has to match the allocation one.
+ if (Header.AllocType != Type) {
+ // With the exception of memalign'd Chunks, that can be still be free'd.
+ if (Header.AllocType != FromMemalign || Type != FromMalloc)
+ dieWithMessage("allocation type mismatch when deallocating address "
+ "%p\n", Ptr);
+ }
+ }
+ const uptr Size = Chunk::getSize(Ptr, &Header);
+ if (DeleteSizeMismatch) {
+ if (DeleteSize && DeleteSize != Size)
+ dieWithMessage("invalid sized delete when deallocating address %p\n",
+ Ptr);
+ }
+ (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
+ quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ }
+
+ // Reallocates a chunk. We can save on a new allocation if the new requested
+ // size still fits in the chunk.
+ void *reallocate(void *OldPtr, uptr NewSize) {
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
+ size_t OldSize = GuardedAlloc.getSize(OldPtr);
+ void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+ if (NewPtr)
+ memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
+ GuardedAlloc.deallocate(OldPtr);
+ return NewPtr;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(!Chunk::isAligned(OldPtr)))
+ dieWithMessage("misaligned address when reallocating address %p\n",
+ OldPtr);
+ UnpackedHeader OldHeader;
+ Chunk::loadHeader(OldPtr, &OldHeader);
+ if (UNLIKELY(OldHeader.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when reallocating address %p\n",
+ OldPtr);
+ if (DeallocationTypeMismatch) {
+ if (UNLIKELY(OldHeader.AllocType != FromMalloc))
+ dieWithMessage("allocation type mismatch when reallocating address "
+ "%p\n", OldPtr);
+ }
+ const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
+ // The new size still fits in the current chunk, and the size difference
+ // is reasonable.
+ if (NewSize <= UsableSize &&
+ (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
+ UnpackedHeader NewHeader = OldHeader;
+ NewHeader.SizeOrUnusedBytes =
+ OldHeader.ClassId ? NewSize : UsableSize - NewSize;
+ Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
+ return OldPtr;
+ }
+ // Otherwise, we have to allocate a new chunk and copy the contents of the
+ // old one.
+ void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+ if (NewPtr) {
+ const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
+ UsableSize - OldHeader.SizeOrUnusedBytes;
+ memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
+ quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ }
+ return NewPtr;
+ }
+
+ // Helper function that returns the actual usable size of a chunk.
+ uptr getUsableSize(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return 0;
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+ UnpackedHeader Header;
+ Chunk::loadHeader(Ptr, &Header);
+ // Getting the usable size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
+ return Chunk::getUsableSize(Ptr, &Header);
+ }
+
+ void *calloc(uptr NMemB, uptr Size) {
+ initThreadMaybe();
+ if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportCallocOverflow(NMemB, Size);
+ }
+ return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
+ }
+
+ void commitBack(ScudoTSD *TSD) {
+ Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
+ Backend.destroyCache(&TSD->Cache);
+ }
+
+ uptr getStats(AllocatorStat StatType) {
+ initThreadMaybe();
+ uptr stats[AllocatorStatCount];
+ Backend.getStats(stats);
+ return stats[StatType];
+ }
+
+ bool canReturnNull() {
+ initThreadMaybe();
+ return AllocatorMayReturnNull();
+ }
+
+ void setRssLimit(uptr LimitMb, bool HardLimit) {
+ if (HardLimit)
+ HardRssLimitMb = LimitMb;
+ else
+ SoftRssLimitMb = LimitMb;
+ CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
+ }
+
+ void printStats() {
+ initThreadMaybe();
+ Backend.printStats();
+ }
+};
+
+NOINLINE void Allocator::performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be 0. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment =
+ 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset;
+ if (Header.Offset != MaxOffset)
+ dieWithMessage("maximum possible offset doesn't fit in header\n");
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
+ Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+ if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
+ dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
+
+ const uptr LargestClassId = SizeClassMap::kLargestClassID;
+ Header.ClassId = LargestClassId;
+ if (Header.ClassId != LargestClassId)
+ dieWithMessage("largest class ID doesn't fit in header\n");
+}
+
+// Opportunistic RSS limit check. This will update the RSS limit status, if
+// it can, every 250ms, otherwise it will just return the current one.
+NOINLINE bool Allocator::isRssLimitExceeded() {
+ u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
+ const u64 CurrentCheck = MonotonicNanoTime();
+ if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
+ return atomic_load_relaxed(&RssLimitExceeded);
+ if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
+ CurrentCheck, memory_order_relaxed))
+ return atomic_load_relaxed(&RssLimitExceeded);
+ // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
+ // RSS from /proc/self/statm by default. We might want to
+ // call getrusage directly, even if it's less accurate.
+ const uptr CurrentRssMb = GetRSS() >> 20;
+ if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
+ dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
+ HardRssLimitMb, CurrentRssMb);
+ if (SoftRssLimitMb) {
+ if (atomic_load_relaxed(&RssLimitExceeded)) {
+ if (CurrentRssMb <= SoftRssLimitMb)
+ atomic_store_relaxed(&RssLimitExceeded, false);
+ } else {
+ if (CurrentRssMb > SoftRssLimitMb) {
+ atomic_store_relaxed(&RssLimitExceeded, true);
+ Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
+ SoftRssLimitMb, CurrentRssMb);
+ }
+ }
+ }
+ return atomic_load_relaxed(&RssLimitExceeded);
+}
+
+static Allocator Instance(LINKER_INITIALIZED);
+
+static BackendT &getBackend() {
+ return Instance.Backend;
+}
+
+void initScudo() {
+ Instance.init();
+#ifdef GWP_ASAN_HOOKS
+ gwp_asan::options::initOptions();
+ gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
+ Opts.Backtrace = gwp_asan::options::getBacktraceFunction();
+ Opts.PrintBacktrace = gwp_asan::options::getPrintBacktraceFunction();
+ GuardedAlloc.init(Opts);
+#endif // GWP_ASAN_HOOKS
+}
+
+void ScudoTSD::init() {
+ getBackend().initCache(&Cache);
+ memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
+}
+
+void ScudoTSD::commitBack() {
+ Instance.commitBack(this);
+}
+
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
+ if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
+ errno = EINVAL;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportAllocationAlignmentNotPowerOfTwo(Alignment);
+ }
+ return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
+}
+
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
+ Instance.deallocate(Ptr, Size, Alignment, Type);
+}
+
+void *scudoRealloc(void *Ptr, uptr Size) {
+ if (!Ptr)
+ return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
+ if (Size == 0) {
+ Instance.deallocate(Ptr, 0, 0, FromMalloc);
+ return nullptr;
+ }
+ return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
+}
+
+void *scudoCalloc(uptr NMemB, uptr Size) {
+ return SetErrnoOnNull(Instance.calloc(NMemB, Size));
+}
+
+void *scudoValloc(uptr Size) {
+ return SetErrnoOnNull(
+ Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
+}
+
+void *scudoPvalloc(uptr Size) {
+ const uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
+ errno = ENOMEM;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportPvallocOverflow(Size);
+ }
+ // pvalloc(0) should allocate one page.
+ Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
+ return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
+}
+
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
+ if (!Instance.canReturnNull())
+ reportInvalidPosixMemalignAlignment(Alignment);
+ return EINVAL;
+ }
+ void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
+ if (UNLIKELY(!Ptr))
+ return ENOMEM;
+ *MemPtr = Ptr;
+ return 0;
+}
+
+void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
+ errno = EINVAL;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportInvalidAlignedAllocAlignment(Size, Alignment);
+ }
+ return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
+}
+
+uptr scudoMallocUsableSize(void *Ptr) {
+ return Instance.getUsableSize(Ptr);
+}
+
+} // namespace __scudo
+
+using namespace __scudo;
+
+// MallocExtension helper functions
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ return Instance.getStats(AllocatorStatAllocated);
+}
+
+uptr __sanitizer_get_heap_size() {
+ return Instance.getStats(AllocatorStatMapped);
+}
+
+uptr __sanitizer_get_free_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
+ return Size;
+}
+
+int __sanitizer_get_ownership(const void *Ptr) {
+ return Instance.isValidPointer(Ptr);
+}
+
+uptr __sanitizer_get_allocated_size(const void *Ptr) {
+ return Instance.getUsableSize(Ptr);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+ void *Ptr, uptr Size) {
+ (void)Ptr;
+ (void)Size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
+ (void)Ptr;
+}
+#endif
+
+// Interface functions
+
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
+ if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
+ return;
+ Instance.setRssLimit(LimitMb, !!HardLimit);
+}
+
+void __scudo_print_stats() {
+ Instance.printStats();
+}
diff --git a/compiler-rt/lib/scudo/scudo_allocator.h b/compiler-rt/lib/scudo/scudo_allocator.h
new file mode 100644
index 000000000000..0efa5c520296
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator.h
@@ -0,0 +1,125 @@
+//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_allocator.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_H_
+#define SCUDO_ALLOCATOR_H_
+
+#include "scudo_platform.h"
+
+namespace __scudo {
+
+enum AllocType : u8 {
+ FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
+ FromNew = 1, // Memory block came from operator new.
+ FromNewArray = 2, // Memory block came from operator new [].
+ FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
+};
+
+enum ChunkState : u8 {
+ ChunkAvailable = 0,
+ ChunkAllocated = 1,
+ ChunkQuarantine = 2
+};
+
+// Our header requires 64 bits of storage. Having the offset saves us from
+// using functions such as GetBlockBegin, that is fairly costly. Our first
+// implementation used the MetaData as well, which offers the advantage of
+// being stored away from the chunk itself, but accessing it was costly as
+// well. The header will be atomically loaded and stored.
+typedef u64 PackedHeader;
+struct UnpackedHeader {
+ u64 Checksum : 16;
+ u64 ClassId : 8;
+ u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of
+ // unused bytes in the chunk for Secondary ones.
+ u64 State : 2; // available, allocated, or quarantined
+ u64 AllocType : 2; // malloc, new, new[], or memalign
+ u64 Offset : 16; // Offset from the beginning of the backend
+ // allocation to the beginning of the chunk
+ // itself, in multiples of MinAlignment. See
+ // comment about its maximum value and in init().
+};
+
+typedef atomic_uint64_t AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
+const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
+const uptr MaxAlignmentLog = 24; // 16 MB
+const uptr MinAlignment = 1 << MinAlignmentLog;
+const uptr MaxAlignment = 1 << MaxAlignmentLog;
+
+// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
+// This way we can use it in constexpr variables and functions declarations.
+constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
+ return (Size + Boundary - 1) & ~(Boundary - 1);
+}
+
+namespace Chunk {
+ constexpr uptr getHeaderSize() {
+ return RoundUpTo(sizeof(PackedHeader), MinAlignment);
+ }
+}
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+const uptr AllocatorSpace = ~0ULL;
+struct AP64 {
+ static const uptr kSpaceBeg = AllocatorSpace;
+ static const uptr kSpaceSize = AllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __scudo::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+typedef SizeClassAllocator64<AP64> PrimaryT;
+#else
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef __scudo::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = RegionSizeLog;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
+ SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
+};
+typedef SizeClassAllocator32<AP32> PrimaryT;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+#include "scudo_allocator_secondary.h"
+
+typedef LargeMmapAllocator SecondaryT;
+
+#include "scudo_allocator_combined.h"
+
+typedef CombinedAllocator BackendT;
+typedef CombinedAllocator::AllocatorCache AllocatorCacheT;
+
+void initScudo();
+
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type);
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type);
+void *scudoRealloc(void *Ptr, uptr Size);
+void *scudoCalloc(uptr NMemB, uptr Size);
+void *scudoValloc(uptr Size);
+void *scudoPvalloc(uptr Size);
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
+void *scudoAlignedAlloc(uptr Alignment, uptr Size);
+uptr scudoMallocUsableSize(void *Ptr);
+
+} // namespace __scudo
+
+#endif // SCUDO_ALLOCATOR_H_
diff --git a/compiler-rt/lib/scudo/scudo_allocator_combined.h b/compiler-rt/lib/scudo/scudo_allocator_combined.h
new file mode 100644
index 000000000000..d61cc9ec1a52
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator_combined.h
@@ -0,0 +1,75 @@
+//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Combined Allocator, dispatches allocation & deallocation requests to
+/// the Primary or the Secondary backend allocators.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMBINED_H_
+#define SCUDO_ALLOCATOR_COMBINED_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+class CombinedAllocator {
+ public:
+ using PrimaryAllocator = PrimaryT;
+ using SecondaryAllocator = SecondaryT;
+ using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
+ void init(s32 ReleaseToOSIntervalMs) {
+ Primary.Init(ReleaseToOSIntervalMs);
+ Secondary.Init();
+ Stats.Init();
+ }
+
+ // Primary allocations are always MinAlignment aligned, and as such do not
+ // require an Alignment parameter.
+ void *allocatePrimary(AllocatorCache *Cache, uptr ClassId) {
+ return Cache->Allocate(&Primary, ClassId);
+ }
+
+ // Secondary allocations do not require a Cache, but do require an Alignment
+ // parameter.
+ void *allocateSecondary(uptr Size, uptr Alignment) {
+ return Secondary.Allocate(&Stats, Size, Alignment);
+ }
+
+ void deallocatePrimary(AllocatorCache *Cache, void *Ptr, uptr ClassId) {
+ Cache->Deallocate(&Primary, ClassId, Ptr);
+ }
+
+ void deallocateSecondary(void *Ptr) {
+ Secondary.Deallocate(&Stats, Ptr);
+ }
+
+ void initCache(AllocatorCache *Cache) {
+ Cache->Init(&Stats);
+ }
+
+ void destroyCache(AllocatorCache *Cache) {
+ Cache->Destroy(&Primary, &Stats);
+ }
+
+ void getStats(AllocatorStatCounters StatType) const {
+ Stats.Get(StatType);
+ }
+
+ void printStats() {
+ Primary.PrintStats();
+ Secondary.PrintStats();
+ }
+
+ private:
+ PrimaryAllocator Primary;
+ SecondaryAllocator Secondary;
+ AllocatorGlobalStats Stats;
+};
+
+#endif // SCUDO_ALLOCATOR_COMBINED_H_
diff --git a/compiler-rt/lib/scudo/scudo_allocator_secondary.h b/compiler-rt/lib/scudo/scudo_allocator_secondary.h
new file mode 100644
index 000000000000..80198c4aebf5
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator_secondary.h
@@ -0,0 +1,192 @@
+//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Secondary Allocator.
+/// This services allocation that are too large to be serviced by the Primary
+/// Allocator. It is directly backed by the memory mapping functions of the
+/// operating system.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
+#define SCUDO_ALLOCATOR_SECONDARY_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+// Secondary backed allocations are standalone chunks that contain extra
+// information stored in a LargeChunk::Header prior to the frontend's header.
+//
+// The secondary takes care of alignment requirements (so that it can release
+// unnecessary pages in the rare event of larger alignments), and as such must
+// know about the frontend's header size.
+//
+// Since Windows doesn't support partial releasing of a reserved memory region,
+// we have to keep track of both the reserved and the committed memory.
+//
+// The resulting chunk resembles the following:
+//
+// +--------------------+
+// | Guard page(s) |
+// +--------------------+
+// | Unused space* |
+// +--------------------+
+// | LargeChunk::Header |
+// +--------------------+
+// | {Unp,P}ackedHeader |
+// +--------------------+
+// | Data (aligned) |
+// +--------------------+
+// | Unused space** |
+// +--------------------+
+// | Guard page(s) |
+// +--------------------+
+
+namespace LargeChunk {
+struct Header {
+ ReservedAddressRange StoredRange;
+ uptr CommittedSize;
+ uptr Size;
+};
+constexpr uptr getHeaderSize() {
+ return RoundUpTo(sizeof(Header), MinAlignment);
+}
+static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+static Header *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+} // namespace LargeChunk
+
+class LargeMmapAllocator {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+ const uptr UserSize = Size - Chunk::getHeaderSize();
+ // The Scudo frontend prevents us from allocating more than
+ // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
+ uptr ReservedSize = Size + LargeChunk::getHeaderSize();
+ if (UNLIKELY(Alignment > MinAlignment))
+ ReservedSize += Alignment;
+ const uptr PageSize = GetPageSizeCached();
+ ReservedSize = RoundUpTo(ReservedSize, PageSize);
+ // Account for 2 guard pages, one before and one after the chunk.
+ ReservedSize += 2 * PageSize;
+
+ ReservedAddressRange AddressRange;
+ uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
+ if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
+ return nullptr;
+ // A page-aligned pointer is assumed after that, so check it now.
+ DCHECK(IsAligned(ReservedBeg, PageSize));
+ uptr ReservedEnd = ReservedBeg + ReservedSize;
+ // The beginning of the user area for that allocation comes after the
+ // initial guard page, and both headers. This is the pointer that has to
+ // abide by alignment requirements.
+ uptr CommittedBeg = ReservedBeg + PageSize;
+ uptr UserBeg = CommittedBeg + HeadersSize;
+ uptr UserEnd = UserBeg + UserSize;
+ uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
+
+ // In the rare event of larger alignments, we will attempt to fit the mmap
+ // area better and unmap extraneous memory. This will also ensure that the
+ // offset and unused bytes field of the header stay small.
+ if (UNLIKELY(Alignment > MinAlignment)) {
+ if (!IsAligned(UserBeg, Alignment)) {
+ UserBeg = RoundUpTo(UserBeg, Alignment);
+ CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
+ const uptr NewReservedBeg = CommittedBeg - PageSize;
+ DCHECK_GE(NewReservedBeg, ReservedBeg);
+ if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
+ AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
+ ReservedBeg = NewReservedBeg;
+ }
+ UserEnd = UserBeg + UserSize;
+ CommittedEnd = RoundUpTo(UserEnd, PageSize);
+ }
+ const uptr NewReservedEnd = CommittedEnd + PageSize;
+ DCHECK_LE(NewReservedEnd, ReservedEnd);
+ if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
+ AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
+ ReservedEnd = NewReservedEnd;
+ }
+ }
+
+ DCHECK_LE(UserEnd, CommittedEnd);
+ const uptr CommittedSize = CommittedEnd - CommittedBeg;
+ // Actually mmap the memory, preserving the guard pages on either sides.
+ CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
+ const uptr Ptr = UserBeg - Chunk::getHeaderSize();
+ LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+ H->StoredRange = AddressRange;
+ H->Size = CommittedEnd - Ptr;
+ H->CommittedSize = CommittedSize;
+
+ // The primary adds the whole class size to the stats when allocating a
+ // chunk, so we will do something similar here. But we will not account for
+ // the guard pages.
+ {
+ SpinMutexLock l(&StatsMutex);
+ Stats->Add(AllocatorStatAllocated, CommittedSize);
+ Stats->Add(AllocatorStatMapped, CommittedSize);
+ AllocatedBytes += CommittedSize;
+ if (LargestSize < CommittedSize)
+ LargestSize = CommittedSize;
+ NumberOfAllocs++;
+ }
+
+ return reinterpret_cast<void *>(Ptr);
+ }
+
+ void Deallocate(AllocatorStats *Stats, void *Ptr) {
+ LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+ // Since we're unmapping the entirety of where the ReservedAddressRange
+ // actually is, copy onto the stack.
+ ReservedAddressRange AddressRange = H->StoredRange;
+ const uptr Size = H->CommittedSize;
+ {
+ SpinMutexLock l(&StatsMutex);
+ Stats->Sub(AllocatorStatAllocated, Size);
+ Stats->Sub(AllocatorStatMapped, Size);
+ FreedBytes += Size;
+ NumberOfFrees++;
+ }
+ AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
+ AddressRange.size());
+ }
+
+ static uptr GetActuallyAllocatedSize(void *Ptr) {
+ return LargeChunk::getHeader(Ptr)->Size;
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
+ "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+ FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+ (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
+ }
+
+ private:
+ static constexpr uptr HeadersSize =
+ LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
+
+ StaticSpinMutex StatsMutex;
+ u32 NumberOfAllocs;
+ u32 NumberOfFrees;
+ uptr AllocatedBytes;
+ uptr FreedBytes;
+ uptr LargestSize;
+};
+
+#endif // SCUDO_ALLOCATOR_SECONDARY_H_
diff --git a/compiler-rt/lib/scudo/scudo_crc32.cpp b/compiler-rt/lib/scudo/scudo_crc32.cpp
new file mode 100644
index 000000000000..87473505fe79
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_crc32.cpp
@@ -0,0 +1,24 @@
+//===-- scudo_crc32.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// CRC32 function leveraging hardware specific instructions. This has to be
+/// kept separated to restrict the use of compiler specific flags to this file.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_crc32.h"
+
+namespace __scudo {
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ return CRC32_INTRINSIC(Crc, Data);
+}
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+} // namespace __scudo
diff --git a/compiler-rt/lib/scudo/scudo_crc32.h b/compiler-rt/lib/scudo/scudo_crc32.h
new file mode 100644
index 000000000000..bad15a929a3e
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_crc32.h
@@ -0,0 +1,100 @@
+//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo chunk header checksum related definitions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CRC32_H_
+#define SCUDO_CRC32_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+# ifdef __SSE4_2__
+# include <smmintrin.h>
+# define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+# endif
+# ifdef __ARM_FEATURE_CRC32
+# include <arm_acle.h>
+# define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+# endif
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+namespace __scudo {
+
+enum : u8 {
+ CRC32Software = 0,
+ CRC32Hardware = 1,
+};
+
+static const u32 CRC32Table[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
+ for (uptr i = 0; i < sizeof(Data); i++) {
+ Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
+ Data >>= 8;
+ }
+ return Crc;
+}
+
+SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace __scudo
+
+#endif // SCUDO_CRC32_H_
diff --git a/compiler-rt/lib/scudo/scudo_errors.cpp b/compiler-rt/lib/scudo/scudo_errors.cpp
new file mode 100644
index 000000000000..4bea9ebc6ab0
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_errors.cpp
@@ -0,0 +1,77 @@
+//===-- scudo_errors.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Verbose termination functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ dieWithMessage("calloc parameters overflow: count * size (%zd * %zd) cannot "
+ "be represented with type size_t\n", Count, Size);
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ dieWithMessage("pvalloc parameters overflow: size 0x%zx rounded up to system "
+ "page size 0x%zx cannot be represented in type size_t\n", Size,
+ GetPageSizeCached());
+}
+
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+ uptr MaxAlignment) {
+ dieWithMessage("invalid allocation alignment: %zd exceeds maximum supported "
+ "allocation of %zd\n", Alignment, MaxAlignment);
+}
+
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment) {
+ dieWithMessage("invalid allocation alignment: %zd, alignment must be a power "
+ "of two\n", Alignment);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ dieWithMessage(
+ "invalid alignment requested in posix_memalign: %zd, alignment"
+ " must be a power of two and a multiple of sizeof(void *) == %zd\n",
+ Alignment, sizeof(void *));
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment) {
+#if SANITIZER_POSIX
+ dieWithMessage("invalid alignment requested in aligned_alloc: %zd, alignment "
+ "must be a power of two and the requested size 0x%zx must be a multiple "
+ "of alignment\n", Alignment, Size);
+#else
+ dieWithMessage("invalid alignment requested in aligned_alloc: %zd, the "
+ "requested size 0x%zx must be a multiple of alignment\n", Alignment,
+ Size);
+#endif
+}
+
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ dieWithMessage("requested allocation size 0x%zx (0x%zx after adjustments) "
+ "exceeds maximum supported size of 0x%zx\n", UserSize, TotalSize,
+ MaxSize);
+}
+
+void NORETURN reportRssLimitExceeded() {
+ dieWithMessage("specified RSS limit exceeded, currently set to "
+ "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ dieWithMessage("allocator is out of memory trying to allocate 0x%zx bytes\n",
+ RequestedSize);
+}
+
+} // namespace __scudo
diff --git a/compiler-rt/lib/scudo/scudo_errors.h b/compiler-rt/lib/scudo/scudo_errors.h
new file mode 100644
index 000000000000..258695c2c02c
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_errors.h
@@ -0,0 +1,34 @@
+//===-- scudo_errors.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_errors.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ERRORS_H_
+#define SCUDO_ERRORS_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+ uptr MaxAlignment);
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportRssLimitExceeded();
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+
+} // namespace __scudo
+
+#endif // SCUDO_ERRORS_H_
diff --git a/compiler-rt/lib/scudo/scudo_flags.cpp b/compiler-rt/lib/scudo/scudo_flags.cpp
new file mode 100644
index 000000000000..c564e217b35b
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_flags.cpp
@@ -0,0 +1,136 @@
+//===-- scudo_flags.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator flag parsing logic.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __scudo {
+
+static Flags ScudoFlags; // Use via getFlags().
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.exitcode = 1;
+ OverrideCommonFlags(cf);
+ }
+ Flags *f = getFlags();
+ f->setDefaults();
+
+ FlagParser ScudoParser;
+ RegisterScudoFlags(&ScudoParser, f);
+ RegisterCommonFlags(&ScudoParser);
+
+ // Override from compile definition.
+ ScudoParser.ParseString(getCompileDefinitionScudoDefaultOptions());
+
+ // Override from user-specified string.
+ ScudoParser.ParseString(getScudoDefaultOptions());
+
+ // Override from environment.
+ ScudoParser.ParseStringFromEnv("SCUDO_OPTIONS");
+
+ InitializeCommonFlags();
+
+ // Sanity checks and default settings for the Quarantine parameters.
+
+ if (f->QuarantineSizeMb >= 0) {
+ // Backward compatible logic if QuarantineSizeMb is set.
+ if (f->QuarantineSizeKb >= 0) {
+ dieWithMessage("ERROR: please use either QuarantineSizeMb (deprecated) "
+ "or QuarantineSizeKb, but not both\n");
+ }
+ if (f->QuarantineChunksUpToSize >= 0) {
+ dieWithMessage("ERROR: QuarantineChunksUpToSize cannot be used in "
+ " conjunction with the deprecated QuarantineSizeMb option\n");
+ }
+ // If everything is in order, update QuarantineSizeKb accordingly.
+ f->QuarantineSizeKb = f->QuarantineSizeMb * 1024;
+ } else {
+ // Otherwise proceed with the new options.
+ if (f->QuarantineSizeKb < 0) {
+ const int DefaultQuarantineSizeKb = FIRST_32_SECOND_64(64, 256);
+ f->QuarantineSizeKb = DefaultQuarantineSizeKb;
+ }
+ if (f->QuarantineChunksUpToSize < 0) {
+ const int DefaultQuarantineChunksUpToSize = FIRST_32_SECOND_64(512, 2048);
+ f->QuarantineChunksUpToSize = DefaultQuarantineChunksUpToSize;
+ }
+ }
+
+ // We enforce an upper limit for the chunk quarantine threshold of 4Mb.
+ if (f->QuarantineChunksUpToSize > (4 * 1024 * 1024)) {
+ dieWithMessage("ERROR: the chunk quarantine threshold is too large\n");
+ }
+
+ // We enforce an upper limit for the quarantine size of 32Mb.
+ if (f->QuarantineSizeKb > (32 * 1024)) {
+ dieWithMessage("ERROR: the quarantine size is too large\n");
+ }
+
+ if (f->ThreadLocalQuarantineSizeKb < 0) {
+ const int DefaultThreadLocalQuarantineSizeKb = FIRST_32_SECOND_64(16, 64);
+ f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
+ }
+ // And an upper limit of 8Mb for the thread quarantine cache.
+ if (f->ThreadLocalQuarantineSizeKb > (8 * 1024)) {
+ dieWithMessage("ERROR: the per thread quarantine cache size is too "
+ "large\n");
+ }
+ if (f->ThreadLocalQuarantineSizeKb == 0 && f->QuarantineSizeKb > 0) {
+ dieWithMessage("ERROR: ThreadLocalQuarantineSizeKb can be set to 0 only "
+ "when QuarantineSizeKb is set to 0\n");
+ }
+}
+
+Flags *getFlags() {
+ return &ScudoFlags;
+}
+
+} // namespace __scudo
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void) {
+ return "";
+}
+#endif
diff --git a/compiler-rt/lib/scudo/scudo_flags.h b/compiler-rt/lib/scudo/scudo_flags.h
new file mode 100644
index 000000000000..483c79621cbf
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_flags.h
@@ -0,0 +1,32 @@
+//===-- scudo_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_flags.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+namespace __scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+
+ void setDefaults();
+};
+
+Flags *getFlags();
+
+void initFlags();
+
+} // namespace __scudo
+
+#endif // SCUDO_FLAGS_H_
diff --git a/compiler-rt/lib/scudo/scudo_flags.inc b/compiler-rt/lib/scudo/scudo_flags.inc
new file mode 100644
index 000000000000..c124738c1f3a
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_flags.inc
@@ -0,0 +1,48 @@
+//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator runtime flags.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+# error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, QuarantineSizeMb, -1,
+ "Deprecated. Please use QuarantineSizeKb.")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineSizeKb, -1,
+ "Size in KB of quarantine used to delay the actual deallocation of "
+ "chunks. Lower value may reduce memory usage but decrease the "
+ "effectiveness of the mitigation. Defaults to 64KB (32-bit) or "
+ "256KB (64-bit)")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, -1,
+ "Size in KB of per-thread cache used to offload the global "
+ "quarantine. Lower value may reduce memory usage but might increase "
+ "the contention on the global quarantine. Defaults to 16KB (32-bit) "
+ "or 64KB (64-bit)")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineChunksUpToSize, -1,
+ "Size in bytes up to which chunks will be quarantined (if lower than"
+ "or equal to). Defaults to 256 (32-bit) or 2048 (64-bit)")
+
+// Disable the deallocation type check by default on Android, it causes too many
+// issues with third party libraries.
+SCUDO_FLAG(bool, DeallocationTypeMismatch, !SANITIZER_ANDROID,
+ "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, DeleteSizeMismatch, true,
+ "Report errors on mismatch between size of new and delete.")
+
+SCUDO_FLAG(bool, ZeroContents, false,
+ "Zero chunk contents on allocation and deallocation.")
diff --git a/compiler-rt/lib/scudo/scudo_interface_internal.h b/compiler-rt/lib/scudo/scudo_interface_internal.h
new file mode 100644
index 000000000000..75c63aa6d489
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_interface_internal.h
@@ -0,0 +1,32 @@
+//===-- scudo_interface_internal.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Private Scudo interface header.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_INTERNAL_H_
+#define SCUDO_INTERFACE_INTERNAL_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+using __sanitizer::s32;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __scudo_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_print_stats();
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_INTERNAL_H_
diff --git a/compiler-rt/lib/scudo/scudo_malloc.cpp b/compiler-rt/lib/scudo/scudo_malloc.cpp
new file mode 100644
index 000000000000..a72b861e28ee
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_malloc.cpp
@@ -0,0 +1,84 @@
+//===-- scudo_malloc.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for malloc related functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+extern "C" {
+INTERCEPTOR_ATTRIBUTE void free(void *ptr) {
+ scudoDeallocate(ptr, 0, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *malloc(size_t size) {
+ return scudoAllocate(size, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *realloc(void *ptr, size_t size) {
+ return scudoRealloc(ptr, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *calloc(size_t nmemb, size_t size) {
+ return scudoCalloc(nmemb, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *valloc(size_t size) {
+ return scudoValloc(size);
+}
+
+INTERCEPTOR_ATTRIBUTE
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
+ return scudoPosixMemalign(memptr, alignment, size);
+}
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR_ATTRIBUTE void cfree(void *ptr) ALIAS("free");
+#endif
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR_ATTRIBUTE void *memalign(size_t alignment, size_t size) {
+ return scudoAllocate(size, alignment, FromMemalign);
+}
+
+INTERCEPTOR_ATTRIBUTE
+void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign");
+#endif
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR_ATTRIBUTE void *pvalloc(size_t size) {
+ return scudoPvalloc(size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR_ATTRIBUTE void *aligned_alloc(size_t alignment, size_t size) {
+ return scudoAlignedAlloc(alignment, size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR_ATTRIBUTE size_t malloc_usable_size(void *ptr) {
+ return scudoMallocUsableSize(ptr);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+INTERCEPTOR_ATTRIBUTE int mallopt(int cmd, int value) {
+ return 0;
+}
+#endif
+} // extern "C"
diff --git a/compiler-rt/lib/scudo/scudo_new_delete.cpp b/compiler-rt/lib/scudo/scudo_new_delete.cpp
new file mode 100644
index 000000000000..03eef7f28bb9
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_new_delete.cpp
@@ -0,0 +1,107 @@
+//===-- scudo_new_delete.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for operators new and delete.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_errors.h"
+
+#include "interception/interception.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+
+// Fake std::nothrow_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: size_t {};
+} // namespace std
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY_ALIGN(Type, Align, NoThrow) \
+ void *Ptr = scudoAllocate(size, static_cast<uptr>(Align), Type); \
+ if (!NoThrow && UNLIKELY(!Ptr)) reportOutOfMemory(size); \
+ return Ptr;
+#define OPERATOR_NEW_BODY(Type, NoThrow) \
+ OPERATOR_NEW_BODY_ALIGN(Type, 0, NoThrow)
+
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/true); }
+
+#define OPERATOR_DELETE_BODY(Type) \
+ scudoDeallocate(ptr, 0, 0, Type);
+#define OPERATOR_DELETE_BODY_SIZE(Type) \
+ scudoDeallocate(ptr, size, 0, Type);
+#define OPERATOR_DELETE_BODY_ALIGN(Type) \
+ scudoDeallocate(ptr, 0, static_cast<uptr>(align), Type);
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN(Type) \
+ scudoDeallocate(ptr, size, static_cast<uptr>(align), Type);
+
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNewArray); }
diff --git a/compiler-rt/lib/scudo/scudo_platform.h b/compiler-rt/lib/scudo/scudo_platform.h
new file mode 100644
index 000000000000..07d4b70fc8e9
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_platform.h
@@ -0,0 +1,93 @@
+//===-- scudo_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo platform specific definitions.
+/// TODO(kostyak): add tests for the compile time defines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#include "sanitizer_common/sanitizer_allocator.h"
+
+#if !SANITIZER_LINUX && !SANITIZER_FUCHSIA
+# error "The Scudo hardened allocator is not supported on this platform."
+#endif
+
+#define SCUDO_TSD_EXCLUSIVE_SUPPORTED (!SANITIZER_ANDROID && !SANITIZER_FUCHSIA)
+
+#ifndef SCUDO_TSD_EXCLUSIVE
+// SCUDO_TSD_EXCLUSIVE wasn't defined, use a default TSD model for the platform.
+# if SANITIZER_ANDROID || SANITIZER_FUCHSIA
+// Android and Fuchsia use a pool of TSDs shared between threads.
+# define SCUDO_TSD_EXCLUSIVE 0
+# elif SANITIZER_LINUX && !SANITIZER_ANDROID
+// Non-Android Linux use an exclusive TSD per thread.
+# define SCUDO_TSD_EXCLUSIVE 1
+# else
+# error "No default TSD model defined for this platform."
+# endif // SANITIZER_ANDROID || SANITIZER_FUCHSIA
+#endif // SCUDO_TSD_EXCLUSIVE
+
+// If the exclusive TSD model is chosen, make sure the platform supports it.
+#if SCUDO_TSD_EXCLUSIVE && !SCUDO_TSD_EXCLUSIVE_SUPPORTED
+# error "The exclusive TSD model is not supported on this platform."
+#endif
+
+// Maximum number of TSDs that can be created for the Shared model.
+#ifndef SCUDO_SHARED_TSD_POOL_SIZE
+# if SANITIZER_ANDROID
+# define SCUDO_SHARED_TSD_POOL_SIZE 2U
+# else
+# define SCUDO_SHARED_TSD_POOL_SIZE 32U
+# endif // SANITIZER_ANDROID
+#endif // SCUDO_SHARED_TSD_POOL_SIZE
+
+// The following allows the public interface functions to be disabled.
+#ifndef SCUDO_CAN_USE_PUBLIC_INTERFACE
+# define SCUDO_CAN_USE_PUBLIC_INTERFACE 1
+#endif
+
+// Hooks in the allocation & deallocation paths can become a security concern if
+// implemented improperly, or if overwritten by an attacker. Use with caution.
+#ifndef SCUDO_CAN_USE_HOOKS
+# if SANITIZER_FUCHSIA
+# define SCUDO_CAN_USE_HOOKS 1
+# else
+# define SCUDO_CAN_USE_HOOKS 0
+# endif // SANITIZER_FUCHSIA
+#endif // SCUDO_CAN_USE_HOOKS
+
+namespace __scudo {
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__aarch64__) && SANITIZER_ANDROID
+const uptr AllocatorSize = 0x4000000000ULL; // 256G.
+# elif defined(__aarch64__)
+const uptr AllocatorSize = 0x10000000000ULL; // 1T.
+# else
+const uptr AllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+#else
+const uptr RegionSizeLog = SANITIZER_ANDROID ? 19 : 20;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+#if !defined(SCUDO_SIZE_CLASS_MAP)
+# define SCUDO_SIZE_CLASS_MAP Dense
+#endif
+
+#define SIZE_CLASS_MAP_TYPE SIZE_CLASS_MAP_TYPE_(SCUDO_SIZE_CLASS_MAP)
+#define SIZE_CLASS_MAP_TYPE_(T) SIZE_CLASS_MAP_TYPE__(T)
+#define SIZE_CLASS_MAP_TYPE__(T) T##SizeClassMap
+
+typedef SIZE_CLASS_MAP_TYPE SizeClassMap;
+
+} // namespace __scudo
+
+#endif // SCUDO_PLATFORM_H_
diff --git a/compiler-rt/lib/scudo/scudo_termination.cpp b/compiler-rt/lib/scudo/scudo_termination.cpp
new file mode 100644
index 000000000000..6c7c0abc6d36
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_termination.cpp
@@ -0,0 +1,41 @@
+//===-- scudo_termination.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains bare-bones termination functions to replace the
+/// __sanitizer ones, in order to avoid any potential abuse of the callbacks
+/// functionality.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __sanitizer {
+
+bool AddDieCallback(DieCallbackType Callback) { return true; }
+
+bool RemoveDieCallback(DieCallbackType Callback) { return true; }
+
+void SetUserDieCallback(DieCallbackType Callback) {}
+
+void NORETURN Die() {
+ if (common_flags()->abort_on_error)
+ Abort();
+ internal__exit(common_flags()->exitcode);
+}
+
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
+
+void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
+ u64 Value1, u64 Value2) {
+ __scudo::dieWithMessage("CHECK failed at %s:%d %s (%lld, %lld)\n",
+ File, Line, Condition, Value1, Value2);
+}
+
+} // namespace __sanitizer
diff --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h
new file mode 100644
index 000000000000..1d4e4e6f126e
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd.h
@@ -0,0 +1,65 @@
+//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo thread specific data definition.
+/// Implementation will differ based on the thread local storage primitives
+/// offered by the underlying platform.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "scudo_allocator.h"
+#include "scudo_utils.h"
+
+#include <pthread.h>
+
+namespace __scudo {
+
+struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
+ AllocatorCacheT Cache;
+ uptr QuarantineCachePlaceHolder[4];
+
+ void init();
+ void commitBack();
+
+ INLINE bool tryLock() {
+ if (Mutex.TryLock()) {
+ atomic_store_relaxed(&Precedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(&Precedence, static_cast<uptr>(
+ MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
+ return false;
+ }
+
+ INLINE void lock() {
+ atomic_store_relaxed(&Precedence, 0);
+ Mutex.Lock();
+ }
+
+ INLINE void unlock() { Mutex.Unlock(); }
+
+ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+ private:
+ StaticSpinMutex Mutex;
+ atomic_uintptr_t Precedence;
+};
+
+void initThread(bool MinimalInit);
+
+// TSD model specific fastpath functions definitions.
+#include "scudo_tsd_exclusive.inc"
+#include "scudo_tsd_shared.inc"
+
+} // namespace __scudo
+
+#endif // SCUDO_TSD_H_
diff --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp b/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
new file mode 100644
index 000000000000..a203a74bbcf8
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
@@ -0,0 +1,67 @@
+//===-- scudo_tsd_exclusive.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo exclusive TSD implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_tsd.h"
+
+#if SCUDO_TSD_EXCLUSIVE
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+static pthread_key_t PThreadKey;
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD TSD;
+
+// Fallback TSD for when the thread isn't initialized yet or is torn down. It
+// can be shared between multiple threads and as such must be locked.
+ScudoTSD FallbackTSD;
+
+static void teardownThread(void *Ptr) {
+ uptr I = reinterpret_cast<uptr>(Ptr);
+ // The glibc POSIX thread-local-storage deallocation routine calls user
+ // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+ // We want to be called last since other destructors might call free and the
+ // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+ // quarantine and swallowing the cache.
+ if (I > 1) {
+ // If pthread_setspecific fails, we will go ahead with the teardown.
+ if (LIKELY(pthread_setspecific(PThreadKey,
+ reinterpret_cast<void *>(I - 1)) == 0))
+ return;
+ }
+ TSD.commitBack();
+ ScudoThreadState = ThreadTornDown;
+}
+
+
+static void initOnce() {
+ CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
+ initScudo();
+ FallbackTSD.init();
+}
+
+void initThread(bool MinimalInit) {
+ CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
+ if (UNLIKELY(MinimalInit))
+ return;
+ CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
+ GetPthreadDestructorIterations())), 0);
+ TSD.init();
+ ScudoThreadState = ThreadInitialized;
+}
+
+} // namespace __scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE
diff --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
new file mode 100644
index 000000000000..08e4d3af7316
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
@@ -0,0 +1,47 @@
+//===-- scudo_tsd_exclusive.inc ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo exclusive TSD fastpath functions implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+# error "This file must be included inside scudo_tsd.h."
+#endif // SCUDO_TSD_H_
+
+#if SCUDO_TSD_EXCLUSIVE
+
+enum ThreadState : u8 {
+ ThreadNotInitialized = 0,
+ ThreadInitialized,
+ ThreadTornDown,
+};
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ThreadState ScudoThreadState;
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoTSD TSD;
+
+extern ScudoTSD FallbackTSD;
+
+ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ if (LIKELY(ScudoThreadState != ThreadNotInitialized))
+ return;
+ initThread(MinimalInit);
+}
+
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
+ FallbackTSD.lock();
+ *UnlockRequired = true;
+ return &FallbackTSD;
+ }
+ *UnlockRequired = false;
+ return &TSD;
+}
+
+#endif // SCUDO_TSD_EXCLUSIVE
diff --git a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
new file mode 100644
index 000000000000..59ad2549998c
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
@@ -0,0 +1,107 @@
+//===-- scudo_tsd_shared.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo shared TSD implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_tsd.h"
+
+#if !SCUDO_TSD_EXCLUSIVE
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+pthread_key_t PThreadKey;
+
+static atomic_uint32_t CurrentIndex;
+static ScudoTSD *TSDs;
+static u32 NumberOfTSDs;
+static u32 CoPrimes[SCUDO_SHARED_TSD_POOL_SIZE];
+static u32 NumberOfCoPrimes = 0;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
+static void initOnce() {
+ CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
+ initScudo();
+ NumberOfTSDs = Min(Max(1U, GetNumberOfCPUsCached()),
+ static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE));
+ TSDs = reinterpret_cast<ScudoTSD *>(
+ MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
+ for (u32 I = 0; I < NumberOfTSDs; I++) {
+ TSDs[I].init();
+ u32 A = I + 1;
+ u32 B = NumberOfTSDs;
+ while (B != 0) { const u32 T = A; A = B; B = T % B; }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+}
+
+ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) {
+#if SANITIZER_ANDROID
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
+#elif SANITIZER_LINUX
+ CurrentTSD = TSD;
+#else
+ CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0);
+#endif // SANITIZER_ANDROID
+}
+
+void initThread(bool MinimalInit) {
+ pthread_once(&GlobalInitialized, initOnce);
+ // Initial context assignment is done in a plain round-robin fashion.
+ u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed);
+ setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+}
+
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) {
+ if (NumberOfTSDs > 1) {
+ // Use the Precedence of the current TSD as our random seed. Since we are in
+ // the slow path, it means that tryLock failed, and as a result it's very
+ // likely that said Precedence is non-zero.
+ u32 RandState = static_cast<u32>(TSD->getPrecedence());
+ const u32 R = Rand(&RandState);
+ const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+ u32 Index = R % NumberOfTSDs;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ ScudoTSD *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
+ }
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (Precedence && Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
+ LowestPrecedence = Precedence;
+ }
+ Index += Inc;
+ if (Index >= NumberOfTSDs)
+ Index -= NumberOfTSDs;
+ }
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
+ }
+ }
+ // Last resort, stick with the current one.
+ TSD->lock();
+ return TSD;
+}
+
+} // namespace __scudo
+
+#endif // !SCUDO_TSD_EXCLUSIVE
diff --git a/compiler-rt/lib/scudo/scudo_tsd_shared.inc b/compiler-rt/lib/scudo/scudo_tsd_shared.inc
new file mode 100644
index 000000000000..8f3362dd3d71
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.inc
@@ -0,0 +1,55 @@
+//===-- scudo_tsd_shared.inc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo shared TSD fastpath functions implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+# error "This file must be included inside scudo_tsd.h."
+#endif // SCUDO_TSD_H_
+
+#if !SCUDO_TSD_EXCLUSIVE
+
+extern pthread_key_t PThreadKey;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
+ALWAYS_INLINE ScudoTSD* getCurrentTSD() {
+#if SANITIZER_ANDROID
+ return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
+#elif SANITIZER_LINUX
+ return CurrentTSD;
+#else
+ return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey));
+#endif // SANITIZER_ANDROID
+}
+
+ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ if (LIKELY(getCurrentTSD()))
+ return;
+ initThread(MinimalInit);
+}
+
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
+
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ ScudoTSD *TSD = getCurrentTSD();
+ DCHECK(TSD && "No TSD associated with the current thread!");
+ *UnlockRequired = true;
+ // Try to lock the currently associated context.
+ if (TSD->tryLock())
+ return TSD;
+ // If it failed, go the slow path.
+ return getTSDAndLockSlow(TSD);
+}
+
+#endif // !SCUDO_TSD_EXCLUSIVE
diff --git a/compiler-rt/lib/scudo/scudo_utils.cpp b/compiler-rt/lib/scudo/scudo_utils.cpp
new file mode 100644
index 000000000000..5e76a4a30f10
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_utils.cpp
@@ -0,0 +1,134 @@
+//===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Platform specific utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+# include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+# include "sanitizer_common/sanitizer_getauxval.h"
+# if SANITIZER_FUCHSIA
+# include <zircon/syscalls.h>
+# include <zircon/features.h>
+# elif SANITIZER_POSIX
+# include "sanitizer_common/sanitizer_posix.h"
+# include <fcntl.h>
+# endif
+#endif
+
+#include <stdarg.h>
+
+// TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less
+// complicated string formatting code. The following is a
+// temporary workaround to be able to use __sanitizer::VSNPrintf.
+namespace __sanitizer {
+
+extern int VSNPrintf(char *buff, int buff_length, const char *format,
+ va_list args);
+
+} // namespace __sanitizer
+
+namespace __scudo {
+
+FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) {
+ static const char ScudoError[] = "Scudo ERROR: ";
+ static constexpr uptr PrefixSize = sizeof(ScudoError) - 1;
+ // Our messages are tiny, 256 characters is more than enough.
+ char Message[256];
+ va_list Args;
+ va_start(Args, Format);
+ internal_memcpy(Message, ScudoError, PrefixSize);
+ VSNPrintf(Message + PrefixSize, sizeof(Message) - PrefixSize, Format, Args);
+ va_end(Args);
+ LogMessageOnPrintf(Message);
+ if (common_flags()->abort_on_error)
+ SetAbortMessage(Message);
+ RawWrite(Message);
+ Die();
+}
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+# ifndef bit_SSE4_2
+# define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+# endif
+bool hasHardwareCRC32() {
+ u32 Eax, Ebx, Ecx, Edx;
+ __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+ const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+ (Edx == signature_INTEL_edx) &&
+ (Ecx == signature_INTEL_ecx);
+ const bool IsAMD = (Ebx == signature_AMD_ebx) &&
+ (Edx == signature_AMD_edx) &&
+ (Ecx == signature_AMD_ecx);
+ if (!IsIntel && !IsAMD)
+ return false;
+ __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+ return !!(Ecx & bit_SSE4_2);
+}
+#elif defined(__arm__) || defined(__aarch64__)
+// For ARM and AArch64, hardware CRC32 support is indicated in the AT_HWCAP
+// auxiliary vector.
+# ifndef AT_HWCAP
+# define AT_HWCAP 16
+# endif
+# ifndef HWCAP_CRC32
+# define HWCAP_CRC32 (1 << 7) // HWCAP_CRC32 is missing on older platforms.
+# endif
+# if SANITIZER_POSIX
+bool hasHardwareCRC32ARMPosix() {
+ uptr F = internal_open("/proc/self/auxv", O_RDONLY);
+ if (internal_iserror(F))
+ return false;
+ struct { uptr Tag; uptr Value; } Entry = { 0, 0 };
+ for (;;) {
+ uptr N = internal_read(F, &Entry, sizeof(Entry));
+ if (internal_iserror(N) || N != sizeof(Entry) ||
+ (Entry.Tag == 0 && Entry.Value == 0) || Entry.Tag == AT_HWCAP)
+ break;
+ }
+ internal_close(F);
+ return (Entry.Tag == AT_HWCAP && (Entry.Value & HWCAP_CRC32) != 0);
+}
+# else
+bool hasHardwareCRC32ARMPosix() { return false; }
+# endif // SANITIZER_POSIX
+
+// Bionic doesn't initialize its globals early enough. This causes issues when
+// trying to access them from a preinit_array (b/25751302) or from another
+// constructor called before the libc one (b/68046352). __progname is
+// initialized after the other globals, so we can check its value to know if
+// calling getauxval is safe.
+extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
+INLINE bool areBionicGlobalsInitialized() {
+ return !SANITIZER_ANDROID || (&__progname && __progname);
+}
+
+bool hasHardwareCRC32() {
+#if SANITIZER_FUCHSIA
+ u32 HWCap;
+ zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK || (HWCap & ZX_ARM64_FEATURE_ISA_CRC32) == 0)
+ return false;
+ return true;
+#else
+ if (&getauxval && areBionicGlobalsInitialized())
+ return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+ return hasHardwareCRC32ARMPosix();
+#endif // SANITIZER_FUCHSIA
+}
+#else
+bool hasHardwareCRC32() { return false; }
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace __scudo
diff --git a/compiler-rt/lib/scudo/scudo_utils.h b/compiler-rt/lib/scudo/scudo_utils.h
new file mode 100644
index 000000000000..a8dfbdeb3b70
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_utils.h
@@ -0,0 +1,36 @@
+//===-- scudo_utils.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_utils.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_UTILS_H_
+#define SCUDO_UTILS_H_
+
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <string.h>
+
+namespace __scudo {
+
+template <class Dest, class Source>
+INLINE Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+void NORETURN dieWithMessage(const char *Format, ...);
+
+bool hasHardwareCRC32();
+
+} // namespace __scudo
+
+#endif // SCUDO_UTILS_H_
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
new file mode 100644
index 000000000000..62c6f2875106
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -0,0 +1,80 @@
+//===-- allocator_config.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_H_
+#define SCUDO_ALLOCATOR_CONFIG_H_
+
+#include "combined.h"
+#include "common.h"
+#include "flags.h"
+#include "primary32.h"
+#include "primary64.h"
+#include "size_class_map.h"
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+namespace scudo {
+
+// Default configurations for various platforms.
+
+struct DefaultConfig {
+ using SizeClassMap = DefaultSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 1GB Regions
+ typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+ // 512KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+ template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+};
+
+struct AndroidConfig {
+ using SizeClassMap = AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 1GB regions
+ typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+ // 512KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
+};
+
+struct AndroidSvelteConfig {
+ using SizeClassMap = SvelteSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 512MB regions
+ typedef SizeClassAllocator64<SizeClassMap, 29U> Primary;
+#else
+ // 64KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
+#endif
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
+};
+
+struct FuchsiaConfig {
+ // 1GB Regions
+ typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
+};
+
+#if SCUDO_ANDROID
+typedef AndroidConfig Config;
+#elif SCUDO_FUCHSIA
+typedef FuchsiaConfig Config;
+#else
+typedef DefaultConfig Config;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
new file mode 100644
index 000000000000..47037d764e25
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -0,0 +1,139 @@
+//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ATOMIC_H_
+#define SCUDO_ATOMIC_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+enum memory_order {
+ memory_order_relaxed = 0,
+ memory_order_consume = 1,
+ memory_order_acquire = 2,
+ memory_order_release = 3,
+ memory_order_acq_rel = 4,
+ memory_order_seq_cst = 5
+};
+COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
+COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
+COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
+COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
+COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
+COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
+
+struct atomic_u8 {
+ typedef u8 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u16 {
+ typedef u16 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_s32 {
+ typedef s32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u32 {
+ typedef u32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u64 {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+ ALIGNED(8) volatile Type ValDoNotUse;
+};
+
+struct atomic_uptr {
+ typedef uptr Type;
+ volatile Type ValDoNotUse;
+};
+
+template <typename T>
+INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type V;
+ __atomic_load(&A->ValDoNotUse, &V, MO);
+ return V;
+}
+
+template <typename T>
+INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ __atomic_store(&A->ValDoNotUse, &V, MO);
+}
+
+INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type R;
+ __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+ return R;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+ __ATOMIC_RELAXED);
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
+ __ATOMIC_RELAXED);
+}
+
+// Clutter-reducing helpers.
+
+template <typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
+ return atomic_load(A, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+ atomic_store(A, V, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+ typename T::Type Cmp,
+ typename T::Type Xchg) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+ return Cmp;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_ATOMIC_H_
diff --git a/compiler-rt/lib/scudo/standalone/bytemap.h b/compiler-rt/lib/scudo/standalone/bytemap.h
new file mode 100644
index 000000000000..caeeb2fac879
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/bytemap.h
@@ -0,0 +1,111 @@
+//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_BYTEMAP_H_
+#define SCUDO_BYTEMAP_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+namespace scudo {
+
+template <uptr Size> class FlatByteMap {
+public:
+ void initLinkerInitialized() {
+ Map = reinterpret_cast<u8 *>(map(nullptr, Size, "scudo:bytemap"));
+ }
+ void init() { initLinkerInitialized(); }
+
+ void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Size);
+ DCHECK_EQ(0U, Map[Index]);
+ Map[Index] = Value;
+ }
+ u8 operator[](uptr Index) {
+ DCHECK_LT(Index, Size);
+ return Map[Index];
+ }
+
+private:
+ u8 *Map;
+};
+
+template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
+public:
+ void initLinkerInitialized() {
+ Level1Map = reinterpret_cast<atomic_uptr *>(
+ map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
+ }
+ void init() {
+ Mutex.init();
+ initLinkerInitialized();
+ }
+
+ void reset() {
+ for (uptr I = 0; I < Level1Size; I++) {
+ u8 *P = get(I);
+ if (!P)
+ continue;
+ unmap(P, Level2Size);
+ }
+ memset(Level1Map, 0, sizeof(atomic_uptr) * Level1Size);
+ }
+
+ void unmapTestOnly() {
+ reset();
+ unmap(reinterpret_cast<void *>(Level1Map),
+ sizeof(atomic_uptr) * Level1Size);
+ }
+
+ uptr size() const { return Level1Size * Level2Size; }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Level1Size * Level2Size);
+ u8 *Level2Map = getOrCreate(Index / Level2Size);
+ DCHECK_EQ(0U, Level2Map[Index % Level2Size]);
+ Level2Map[Index % Level2Size] = Value;
+ }
+
+ u8 operator[](uptr Index) const {
+ DCHECK_LT(Index, Level1Size * Level2Size);
+ u8 *Level2Map = get(Index / Level2Size);
+ if (!Level2Map)
+ return 0;
+ return Level2Map[Index % Level2Size];
+ }
+
+private:
+ u8 *get(uptr Index) const {
+ DCHECK_LT(Index, Level1Size);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&Level1Map[Index], memory_order_acquire));
+ }
+
+ u8 *getOrCreate(uptr Index) {
+ u8 *Res = get(Index);
+ if (!Res) {
+ ScopedLock L(Mutex);
+ if (!(Res = get(Index))) {
+ Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
+ atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
+ memory_order_release);
+ }
+ }
+ return Res;
+ }
+
+ atomic_uptr *Level1Map;
+ HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_BYTEMAP_H_
diff --git a/compiler-rt/lib/scudo/standalone/checksum.cpp b/compiler-rt/lib/scudo/standalone/checksum.cpp
new file mode 100644
index 000000000000..f713f5a81609
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/checksum.cpp
@@ -0,0 +1,70 @@
+//===-- checksum.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+#include "atomic_helpers.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+#include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+#if SCUDO_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+#else
+#include <sys/auxv.h>
+#endif
+#endif
+
+namespace scudo {
+
+Checksum HashAlgorithm = {Checksum::BSD};
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+#ifndef bit_SSE4_2
+#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+#endif
+
+bool hasHardwareCRC32() {
+ u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
+ __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+ const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+ (Edx == signature_INTEL_edx) &&
+ (Ecx == signature_INTEL_ecx);
+ const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
+ (Ecx == signature_AMD_ecx);
+ if (!IsIntel && !IsAMD)
+ return false;
+ __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+ return !!(Ecx & bit_SSE4_2);
+}
+
+#elif defined(__arm__) || defined(__aarch64__)
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_CRC32
+#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
+#endif
+
+bool hasHardwareCRC32() {
+#if SCUDO_FUCHSIA
+ u32 HWCap;
+ const zx_status_t Status =
+ zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK)
+ return false;
+ return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
+#else
+ return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+#endif // SCUDO_FUCHSIA
+}
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/checksum.h b/compiler-rt/lib/scudo/standalone/checksum.h
new file mode 100644
index 000000000000..092342fd6efb
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/checksum.h
@@ -0,0 +1,54 @@
+//===-- checksum.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKSUM_H_
+#define SCUDO_CHECKSUM_H_
+
+#include "internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#ifdef __SSE4_2__
+#include <smmintrin.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+#endif
+#ifdef __ARM_FEATURE_CRC32
+#include <arm_acle.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+#endif
+
+namespace scudo {
+
+enum class Checksum : u8 {
+ BSD = 0,
+ HardwareCRC32 = 1,
+};
+
+// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
+// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
+// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
+// odds with CRC32, but enough for our needs.
+INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
+ for (u8 I = 0; I < sizeof(Data); I++) {
+ Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
+ Sum = static_cast<u16>(Sum + (Data & 0xff));
+ Data >>= 8;
+ }
+ return Sum;
+}
+
+bool hasHardwareCRC32();
+WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKSUM_H_
diff --git a/compiler-rt/lib/scudo/standalone/chunk.h b/compiler-rt/lib/scudo/standalone/chunk.h
new file mode 100644
index 000000000000..9ae75823ba77
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/chunk.h
@@ -0,0 +1,156 @@
+//===-- chunk.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHUNK_H_
+#define SCUDO_CHUNK_H_
+
+#include "platform.h"
+
+#include "atomic_helpers.h"
+#include "checksum.h"
+#include "common.h"
+#include "report.h"
+
+namespace scudo {
+
+extern Checksum HashAlgorithm;
+
+INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
+ // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+ // as opposed to only for crc32_hw.cpp. This means that other hardware
+ // specific instructions were likely emitted at other places, and as a result
+ // there is no reason to not use it here.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
+ return static_cast<u16>(Crc ^ (Crc >> 16));
+#else
+ if (HashAlgorithm == Checksum::HardwareCRC32) {
+ u32 Crc = computeHardwareCRC32(Seed, Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = computeHardwareCRC32(Crc, Array[I]);
+ return static_cast<u16>(Crc ^ (Crc >> 16));
+ } else {
+ u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed), Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Checksum = computeBSDChecksum(Checksum, Array[I]);
+ return Checksum;
+ }
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+namespace Chunk {
+
+// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
+// the associated `UnpackedHeader` fields of their respective enum class type
+// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
+// happening, as it will error, complaining the number of bits is not enough.
+enum Origin : u8 {
+ Malloc = 0,
+ New = 1,
+ NewArray = 2,
+ Memalign = 3,
+};
+
+enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
+
+typedef u64 PackedHeader;
+// Update the 'Mask' constants to reflect changes in this structure.
+struct UnpackedHeader {
+ uptr ClassId : 8;
+ u8 State : 2;
+ u8 Origin : 2;
+ uptr SizeOrUnusedBytes : 20;
+ uptr Offset : 16;
+ uptr Checksum : 16;
+};
+typedef atomic_u64 AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Those constants are required to silence some -Werror=conversion errors when
+// assigning values to the related bitfield variables.
+constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr u8 StateMask = (1U << 2) - 1;
+constexpr u8 OriginMask = (1U << 2) - 1;
+constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
+constexpr uptr OffsetMask = (1UL << 16) - 1;
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
+
+constexpr uptr getHeaderSize() {
+ return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize());
+}
+
+INLINE
+const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+ return reinterpret_cast<const AtomicPackedHeader *>(
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+}
+
+// We do not need a cryptographically strong hash for the checksum, but a CRC
+// type function that can alert us in the event a header is invalid or
+// corrupted. Ideally slightly better than a simple xor of all fields.
+static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
+ UnpackedHeader *Header) {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
+ ARRAY_SIZE(HeaderHolder));
+}
+
+INLINE void storeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+}
+
+INLINE
+void loadHeader(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ if (UNLIKELY(NewUnpackedHeader->Checksum !=
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
+ reportHeaderCorruption(const_cast<void *>(Ptr));
+}
+
+INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader,
+ UnpackedHeader *OldUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+ if (UNLIKELY(!atomic_compare_exchange_strong(
+ getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+ memory_order_relaxed)))
+ reportHeaderRace(Ptr);
+}
+
+INLINE
+bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ return NewUnpackedHeader->Checksum ==
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+}
+
+} // namespace Chunk
+
+} // namespace scudo
+
+#endif // SCUDO_CHUNK_H_
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
new file mode 100644
index 000000000000..60be1dd20d39
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -0,0 +1,596 @@
+//===-- combined.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMBINED_H_
+#define SCUDO_COMBINED_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "flags.h"
+#include "flags_parser.h"
+#include "interface.h"
+#include "local_cache.h"
+#include "quarantine.h"
+#include "report.h"
+#include "secondary.h"
+#include "tsd.h"
+
+namespace scudo {
+
+template <class Params> class Allocator {
+public:
+ using PrimaryT = typename Params::Primary;
+ using CacheT = typename PrimaryT::CacheT;
+ typedef Allocator<Params> ThisT;
+ typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+
+ struct QuarantineCallback {
+ explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
+ : Allocator(Instance), Cache(LocalCache) {}
+
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
+ void recycle(void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+ if (UNLIKELY(Header.State != Chunk::State::Quarantined))
+ reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
+
+ Chunk::UnpackedHeader NewHeader = Header;
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+
+ void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
+ const uptr ClassId = NewHeader.ClassId;
+ if (LIKELY(ClassId))
+ Cache.deallocate(ClassId, BlockBegin);
+ else
+ Allocator.Secondary.deallocate(BlockBegin);
+ }
+
+ // We take a shortcut when allocating a quarantine batch by working with the
+ // appropriate class ID instead of using Size. The compiler should optimize
+ // the class ID computation and work with the associated cache directly.
+ void *allocate(UNUSED uptr Size) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ void *Ptr = Cache.allocate(QuarantineClassId);
+ // Quarantine batch allocation failure is fatal.
+ if (UNLIKELY(!Ptr))
+ reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
+
+ Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
+ Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header = {};
+ Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
+ Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
+ Header.State = Chunk::State::Allocated;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+
+ return Ptr;
+ }
+
+ void deallocate(void *Ptr) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ DCHECK_EQ(Header.ClassId, QuarantineClassId);
+ DCHECK_EQ(Header.Offset, 0);
+ DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
+
+ Chunk::UnpackedHeader NewHeader = Header;
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Cache.deallocate(QuarantineClassId,
+ reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ Chunk::getHeaderSize()));
+ }
+
+ private:
+ ThisT &Allocator;
+ CacheT &Cache;
+ };
+
+ typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
+ typedef typename QuarantineT::CacheT QuarantineCacheT;
+
+ void initLinkerInitialized() {
+ performSanityChecks();
+
+ // Check if hardware CRC32 is supported in the binary and by the platform,
+ // if so, opt for the CRC32 hardware version of the checksum.
+ if (&computeHardwareCRC32 && hasHardwareCRC32())
+ HashAlgorithm = Checksum::HardwareCRC32;
+
+ if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
+ Cookie = static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(this) >> 4));
+
+ initFlags();
+ reportUnrecognizedFlags();
+
+ // Store some flags locally.
+ Options.MayReturnNull = getFlags()->may_return_null;
+ Options.ZeroContents = getFlags()->zero_contents;
+ Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
+ Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
+ Options.QuarantineMaxChunkSize =
+ static_cast<u32>(getFlags()->quarantine_max_chunk_size);
+
+ Stats.initLinkerInitialized();
+ Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
+ Secondary.initLinkerInitialized(&Stats);
+
+ Quarantine.init(
+ static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
+ static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+ }
+
+ void reset() { memset(this, 0, sizeof(*this)); }
+
+ void unmapTestOnly() {
+ TSDRegistry.unmapTestOnly();
+ Primary.unmapTestOnly();
+ }
+
+ TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+
+ void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
+
+ // Release the resources used by a TSD, which involves:
+ // - draining the local quarantine cache to the global quarantine;
+ // - releasing the cached pointers back to the Primary;
+ // - unlinking the local stats from the global ones (destroying the cache does
+ // the last two items).
+ void commitBack(TSD<ThisT> *TSD) {
+ Quarantine.drain(&TSD->QuarantineCache,
+ QuarantineCallback(*this, TSD->Cache));
+ TSD->Cache.destroy(&Stats);
+ }
+
+ NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
+ uptr Alignment = MinAlignment,
+ bool ZeroContents = false) {
+ initThreadMaybe();
+
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (Alignment < MinAlignment)
+ Alignment = MinAlignment;
+
+ // If the requested size happens to be 0 (more common than you might think),
+ // allocate MinAlignment bytes on top of the header. Then add the extra
+ // bytes required to fulfill the alignment requirements: we allocate enough
+ // to be sure that there will be an address in the block that will satisfy
+ // the alignment.
+ const uptr NeededSize =
+ roundUpTo(Size, MinAlignment) +
+ ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
+
+ // Takes care of extravagantly large sizes as well as integer overflows.
+ if (UNLIKELY(Size >= MaxAllowedMallocSize ||
+ NeededSize >= MaxAllowedMallocSize)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
+ }
+
+ void *Block;
+ uptr ClassId;
+ uptr BlockEnd;
+ if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
+ ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+ DCHECK_NE(ClassId, 0U);
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ Block = TSD->Cache.allocate(ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ ClassId = 0;
+ Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
+ }
+
+ if (UNLIKELY(!Block)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportOutOfMemory(NeededSize);
+ }
+
+ // We only need to zero the contents for Primary backed allocations. This
+ // condition is not necessarily unlikely, but since memset is costly, we
+ // might as well mark it as such.
+ if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId))
+ memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
+
+ Chunk::UnpackedHeader Header = {};
+ uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+ if (UNLIKELY(!isAligned(UserPtr, Alignment))) {
+ const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
+ const uptr Offset = AlignedUserPtr - UserPtr;
+ DCHECK_GT(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ UserPtr = AlignedUserPtr;
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ }
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.State = Chunk::State::Allocated;
+ Header.Origin = Origin & Chunk::OriginMask;
+ Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
+ Chunk::SizeOrUnusedBytesMask;
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ Chunk::storeHeader(Cookie, Ptr, &Header);
+
+ if (&__scudo_allocate_hook)
+ __scudo_allocate_hook(Ptr, Size);
+
+ return Ptr;
+ }
+
+ NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
+ UNUSED uptr Alignment = MinAlignment) {
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+
+ if (&__scudo_deallocate_hook)
+ __scudo_deallocate_hook(Ptr);
+
+ if (UNLIKELY(!Ptr))
+ return;
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
+
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ if (Options.DeallocTypeMismatch) {
+ if (Header.Origin != Origin) {
+ // With the exception of memalign'd chunks, that can be still be free'd.
+ if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
+ Origin != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
+ Header.Origin, Origin);
+ }
+ }
+
+ const uptr Size = getSize(Ptr, &Header);
+ if (DeleteSize && Options.DeleteSizeMismatch) {
+ if (UNLIKELY(DeleteSize != Size))
+ reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
+ }
+
+ quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ }
+
+ void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
+ initThreadMaybe();
+
+ // The following cases are handled by the C wrappers.
+ DCHECK_NE(OldPtr, nullptr);
+ DCHECK_NE(NewSize, 0);
+
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
+
+ Chunk::UnpackedHeader OldHeader;
+ Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
+
+ if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
+
+ // Pointer has to be allocated with a malloc-type function. Some
+ // applications think that it is OK to realloc a memalign'ed pointer, which
+ // will trigger this check. It really isn't.
+ if (Options.DeallocTypeMismatch) {
+ if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
+ OldHeader.Origin, Chunk::Origin::Malloc);
+ }
+
+ void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
+ uptr BlockEnd;
+ uptr OldSize;
+ const uptr ClassId = OldHeader.ClassId;
+ if (LIKELY(ClassId)) {
+ BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
+ SizeClassMap::getSizeByClassId(ClassId);
+ OldSize = OldHeader.SizeOrUnusedBytes;
+ } else {
+ BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
+ OldSize = BlockEnd -
+ (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
+ }
+ // If the new chunk still fits in the previously allocated block (with a
+ // reasonable delta), we just keep the old block, and update the chunk
+ // header to reflect the size change.
+ if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
+ const uptr Delta =
+ OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize;
+ if (Delta <= SizeClassMap::MaxSize / 2) {
+ Chunk::UnpackedHeader NewHeader = OldHeader;
+ NewHeader.SizeOrUnusedBytes =
+ (ClassId ? NewSize
+ : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
+ Chunk::SizeOrUnusedBytesMask;
+ Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
+ return OldPtr;
+ }
+ }
+
+ // Otherwise we allocate a new one, and deallocate the old one. Some
+ // allocators will allocate an even larger chunk (by a fixed factor) to
+ // allow for potential further in-place realloc. The gains of such a trick
+ // are currently unclear.
+ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+ if (NewPtr) {
+ const uptr OldSize = getSize(OldPtr, &OldHeader);
+ memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
+ quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ }
+ return NewPtr;
+ }
+
+ // TODO(kostyak): while this locks the Primary & Secondary, it still allows
+ // pointers to be fetched from the TSD. We ultimately want to
+ // lock the registry as well. For now, it's good enough.
+ void disable() {
+ initThreadMaybe();
+ Primary.disable();
+ Secondary.disable();
+ }
+
+ void enable() {
+ initThreadMaybe();
+ Secondary.enable();
+ Primary.enable();
+ }
+
+ // The function returns the amount of bytes required to store the statistics,
+ // which might be larger than the amount of bytes provided. Note that the
+ // statistics buffer is not necessarily constant between calls to this
+ // function. This can be called with a null buffer or zero size for buffer
+ // sizing purposes.
+ uptr getStats(char *Buffer, uptr Size) {
+ ScopedString Str(1024);
+ disable();
+ const uptr Length = getStats(&Str) + 1;
+ enable();
+ if (Length < Size)
+ Size = Length;
+ if (Buffer && Size) {
+ memcpy(Buffer, Str.data(), Size);
+ Buffer[Size - 1] = '\0';
+ }
+ return Length;
+ }
+
+ void printStats() {
+ ScopedString Str(1024);
+ disable();
+ getStats(&Str);
+ enable();
+ Str.output();
+ }
+
+ void releaseToOS() { Primary.releaseToOS(); }
+
+ // Iterate over all chunks and call a callback for all busy chunks located
+ // within the provided memory range. Said callback must not use this allocator
+ // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
+ void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
+ void *Arg) {
+ initThreadMaybe();
+ const uptr From = Base;
+ const uptr To = Base + Size;
+ auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
+ if (Block < From || Block >= To)
+ return;
+ uptr ChunkSize;
+ const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
+ if (ChunkBase != InvalidChunk)
+ Callback(ChunkBase, ChunkSize, Arg);
+ };
+ Primary.iterateOverBlocks(Lambda);
+ Secondary.iterateOverBlocks(Lambda);
+ }
+
+ bool canReturnNull() {
+ initThreadMaybe();
+ return Options.MayReturnNull;
+ }
+
+ // TODO(kostyak): implement this as a "backend" to mallopt.
+ bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
+
+ // Return the usable size for a given chunk. Technically we lie, as we just
+ // report the actual size of a chunk. This is done to counteract code actively
+ // writing past the end of a chunk (like sqlite3) when the usable size allows
+ // for it, which then forces realloc to copy the usable size of a chunk as
+ // opposed to its actual size.
+ uptr getUsableSize(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return 0;
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+ // Getting the usable size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+ return getSize(Ptr, &Header);
+ }
+
+ void getStats(StatCounters S) {
+ initThreadMaybe();
+ Stats.get(S);
+ }
+
+private:
+ typedef MapAllocator SecondaryT;
+ typedef typename PrimaryT::SizeClassMap SizeClassMap;
+
+ static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
+ static const uptr MinAlignment = 1UL << MinAlignmentLog;
+ static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
+
+ // Constants used by the chunk iteration mechanism.
+ static const u32 BlockMarker = 0x44554353U;
+ static const uptr InvalidChunk = ~static_cast<uptr>(0);
+
+ GlobalStats Stats;
+ TSDRegistryT TSDRegistry;
+ PrimaryT Primary;
+ SecondaryT Secondary;
+ QuarantineT Quarantine;
+
+ u32 Cookie;
+
+ struct {
+ u8 MayReturnNull : 1; // may_return_null
+ u8 ZeroContents : 1; // zero_contents
+ u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
+ u8 DeleteSizeMismatch : 1; // delete_size_mismatch
+ u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
+ } Options;
+
+ // The following might get optimized out by the compiler.
+ NOINLINE void performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be small. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ Chunk::UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
+ SizeClassMap::MaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset & Chunk::OffsetMask;
+ if (UNLIKELY(Header.Offset != MaxOffset))
+ reportSanityCheckError("offset");
+
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
+ Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+ if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
+ reportSanityCheckError("size (or unused bytes)");
+
+ const uptr LargestClassId = SizeClassMap::LargestClassId;
+ Header.ClassId = LargestClassId;
+ if (UNLIKELY(Header.ClassId != LargestClassId))
+ reportSanityCheckError("class ID");
+ }
+
+ static INLINE void *getBlockBegin(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(
+ reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
+ (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
+ }
+
+ // Return the size of a chunk as requested during its allocation.
+ INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (LIKELY(Header->ClassId))
+ return SizeOrUnusedBytes;
+ return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
+ reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ TSDRegistry.initThreadMaybe(this, MinimalInit);
+ }
+
+ void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
+ uptr Size) {
+ Chunk::UnpackedHeader NewHeader = *Header;
+ // If the quarantine is disabled, the actual size of a chunk is 0 or larger
+ // than the maximum allowed, we return a chunk directly to the backend.
+ const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
+ (Size > Options.QuarantineMaxChunkSize);
+ if (BypassQuarantine) {
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
+ const uptr ClassId = NewHeader.ClassId;
+ if (LIKELY(ClassId)) {
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ TSD->Cache.deallocate(ClassId, BlockBegin);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ Secondary.deallocate(BlockBegin);
+ }
+ } else {
+ NewHeader.State = Chunk::State::Quarantined;
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ Quarantine.put(&TSD->QuarantineCache,
+ QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+ if (UnlockRequired)
+ TSD->unlock();
+ }
+ }
+
+ // This only cares about valid busy chunks. This might change in the future.
+ uptr getChunkFromBlock(uptr Block, uptr *Size) {
+ u32 Offset = 0;
+ if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
+ Offset = reinterpret_cast<u32 *>(Block)[1];
+ const uptr P = Block + Offset + Chunk::getHeaderSize();
+ const void *Ptr = reinterpret_cast<const void *>(P);
+ Chunk::UnpackedHeader Header;
+ if (!Chunk::isValid(Cookie, Ptr, &Header) ||
+ Header.State != Chunk::State::Allocated)
+ return InvalidChunk;
+ if (Size)
+ *Size = getSize(Ptr, &Header);
+ return P;
+ }
+
+ uptr getStats(ScopedString *Str) {
+ Primary.getStats(Str);
+ Secondary.getStats(Str);
+ Quarantine.getStats(Str);
+ return Str->length();
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_COMBINED_H_
diff --git a/compiler-rt/lib/scudo/standalone/common.cpp b/compiler-rt/lib/scudo/standalone/common.cpp
new file mode 100644
index 000000000000..d93bfc59b3ca
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/common.cpp
@@ -0,0 +1,32 @@
+//===-- common.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "common.h"
+#include "atomic_helpers.h"
+
+namespace scudo {
+
+uptr PageSizeCached;
+uptr getPageSize();
+
+uptr getPageSizeSlow() {
+ PageSizeCached = getPageSize();
+ CHECK_NE(PageSizeCached, 0);
+ return PageSizeCached;
+}
+
+// Fatal internal map() or unmap() error (potentially OOM related).
+void NORETURN dieOnMapUnmapError(bool OutOfMemory) {
+ outputRaw("Scudo ERROR: internal map or unmap failure");
+ if (OutOfMemory)
+ outputRaw(" (OOM)");
+ outputRaw("\n");
+ die();
+}
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h
new file mode 100644
index 000000000000..c015d1ca5669
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/common.h
@@ -0,0 +1,176 @@
+//===-- common.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMMON_H_
+#define SCUDO_COMMON_H_
+
+#include "internal_defs.h"
+
+#include "fuchsia.h"
+#include "linux.h"
+
+#include <stddef.h>
+#include <string.h>
+
+namespace scudo {
+
+template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
+ COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
+ Dest D;
+ memcpy(&D, &S, sizeof(D));
+ return D;
+}
+
+INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+ return (X + Boundary - 1) & ~(Boundary - 1);
+}
+
+INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+ return X & ~(Boundary - 1);
+}
+
+INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
+ return (X & (Alignment - 1)) == 0;
+}
+
+template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
+
+template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
+
+template <class T> void Swap(T &A, T &B) {
+ T Tmp = A;
+ A = B;
+ B = Tmp;
+}
+
+INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+INLINE uptr getMostSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
+}
+
+INLINE uptr roundUpToPowerOfTwo(uptr Size) {
+ DCHECK(Size);
+ if (isPowerOfTwo(Size))
+ return Size;
+ const uptr Up = getMostSignificantSetBitIndex(Size);
+ DCHECK_LT(Size, (1UL << (Up + 1)));
+ DCHECK_GT(Size, (1UL << Up));
+ return 1UL << (Up + 1);
+}
+
+INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return static_cast<uptr>(__builtin_ctzl(X));
+}
+
+INLINE uptr getLog2(uptr X) {
+ DCHECK(isPowerOfTwo(X));
+ return getLeastSignificantSetBitIndex(X);
+}
+
+INLINE u32 getRandomU32(u32 *State) {
+ // ANSI C linear congruential PRNG (16-bit output).
+ // return (*State = *State * 1103515245 + 12345) >> 16;
+ // XorShift (32-bit output).
+ *State ^= *State << 13;
+ *State ^= *State >> 17;
+ *State ^= *State << 5;
+ return *State;
+}
+
+INLINE u32 getRandomModN(u32 *State, u32 N) {
+ return getRandomU32(State) % N; // [0, N)
+}
+
+template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
+ if (N <= 1)
+ return;
+ u32 State = *RandState;
+ for (u32 I = N - 1; I > 0; I--)
+ Swap(A[I], A[getRandomModN(&State, I + 1)]);
+ *RandState = State;
+}
+
+// Hardware specific inlinable functions.
+
+INLINE void yieldProcessor(u8 Count) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__("" ::: "memory");
+ for (u8 I = 0; I < Count; I++)
+ __asm__ __volatile__("pause");
+#elif defined(__aarch64__) || defined(__arm__)
+ __asm__ __volatile__("" ::: "memory");
+ for (u8 I = 0; I < Count; I++)
+ __asm__ __volatile__("yield");
+#endif
+ __asm__ __volatile__("" ::: "memory");
+}
+
+// Platform specific functions.
+
+extern uptr PageSizeCached;
+uptr getPageSizeSlow();
+INLINE uptr getPageSizeCached() {
+ // Bionic uses a hardcoded value.
+ if (SCUDO_ANDROID)
+ return 4096U;
+ if (LIKELY(PageSizeCached))
+ return PageSizeCached;
+ return getPageSizeSlow();
+}
+
+u32 getNumberOfCPUs();
+
+const char *getEnv(const char *Name);
+
+u64 getMonotonicTime();
+
+// Our randomness gathering function is limited to 256 bytes to ensure we get
+// as many bytes as requested, and avoid interruptions (on Linux).
+constexpr uptr MaxRandomLength = 256U;
+bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
+
+// Platform memory mapping functions.
+
+#define MAP_ALLOWNOMEM (1U << 0)
+#define MAP_NOACCESS (1U << 1)
+#define MAP_RESIZABLE (1U << 2)
+
+// Our platform memory mapping use is restricted to 3 scenarios:
+// - reserve memory at a random address (MAP_NOACCESS);
+// - commit memory in a previously reserved space;
+// - commit memory at a random address.
+// As such, only a subset of parameters combinations is valid, which is checked
+// by the function implementation. The Data parameter allows to pass opaque
+// platform specific data to the function.
+// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+// Indicates that we are getting rid of the whole mapping, which might have
+// further consequences on Data, depending on the platform.
+#define UNMAP_ALL (1U << 0)
+
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data = nullptr);
+
+// Internal map & unmap fatal error. This must not call map().
+void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
+
+// Logging related functions.
+
+void setAbortMessage(const char *Message);
+
+} // namespace scudo
+
+#endif // SCUDO_COMMON_H_
diff --git a/compiler-rt/lib/scudo/standalone/crc32_hw.cpp b/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
new file mode 100644
index 000000000000..62841ba51019
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
@@ -0,0 +1,19 @@
+//===-- crc32_hw.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+
+namespace scudo {
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
+}
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/flags.cpp b/compiler-rt/lib/scudo/standalone/flags.cpp
new file mode 100644
index 000000000000..1e970ae49505
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -0,0 +1,57 @@
+//===-- flags.cpp -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags.h"
+#include "common.h"
+#include "flags_parser.h"
+#include "interface.h"
+
+namespace scudo {
+
+Flags *getFlags() {
+ static Flags F;
+ return &F;
+}
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+void registerFlags(FlagParser *Parser, Flags *F) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag(#Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->Name));
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+ Flags *F = getFlags();
+ F->setDefaults();
+ FlagParser Parser;
+ registerFlags(&Parser, F);
+ Parser.parseString(getCompileDefinitionScudoDefaultOptions());
+ Parser.parseString(getScudoDefaultOptions());
+ Parser.parseString(getEnv("SCUDO_OPTIONS"));
+}
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/flags.h b/compiler-rt/lib/scudo/standalone/flags.h
new file mode 100644
index 000000000000..edd39a1b8ba9
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/flags.h
@@ -0,0 +1,30 @@
+//===-- flags.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "flags.inc"
+#undef SCUDO_FLAG
+ void setDefaults();
+};
+
+Flags *getFlags();
+void initFlags();
+class FlagParser;
+void registerFlags(FlagParser *Parser, Flags *F);
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_H_
diff --git a/compiler-rt/lib/scudo/standalone/flags.inc b/compiler-rt/lib/scudo/standalone/flags.inc
new file mode 100644
index 000000000000..25b86e14fa94
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/flags.inc
@@ -0,0 +1,50 @@
+//===-- flags.inc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+#error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, quarantine_size_kb, 0,
+ "Size (in kilobytes) of quarantine used to delay the actual "
+ "deallocation of chunks. Lower value may reduce memory usage but "
+ "decrease the effectiveness of the mitigation.")
+
+SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
+ "Size (in kilobytes) of per-thread cache used to offload the global "
+ "quarantine. Lower value may reduce memory usage but might increase "
+ "the contention on the global quarantine.")
+
+SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
+ "Size (in bytes) up to which chunks will be quarantined (if lower "
+ "than or equal to).")
+
+SCUDO_FLAG(bool, dealloc_type_mismatch, false,
+ "Terminate on a type mismatch in allocation-deallocation functions, "
+ "eg: malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, delete_size_mismatch, true,
+ "Terminate on a size mismatch between a sized-delete and the actual "
+ "size of a chunk (as provided to new/new[]).")
+
+SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
+
+SCUDO_FLAG(int, rss_limit_mb, -1,
+ "Enforce an upper limit (in megabytes) to the process RSS. The "
+ "allocator will terminate or return NULL when allocations are "
+ "attempted past that limit (depending on may_return_null). Negative "
+ "values disable the feature.")
+
+SCUDO_FLAG(bool, may_return_null, true,
+ "Indicate whether the allocator should terminate instead of "
+ "returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
+ "invalid allocation alignments, etc.")
+
+SCUDO_FLAG(int, release_to_os_interval_ms, 5000,
+ "Interval (in milliseconds) at which to attempt release of unused "
+ "memory to the OS. Negative values disable the feature.")
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.cpp b/compiler-rt/lib/scudo/standalone/flags_parser.cpp
new file mode 100644
index 000000000000..070c08b01938
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.cpp
@@ -0,0 +1,164 @@
+//===-- flags_parser.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags_parser.h"
+#include "common.h"
+#include "report.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+class UnknownFlagsRegistry {
+ static const u32 MaxUnknownFlags = 16;
+ const char *UnknownFlagsNames[MaxUnknownFlags];
+ u32 NumberOfUnknownFlags;
+
+public:
+ void add(const char *Name) {
+ CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
+ UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
+ }
+
+ void report() {
+ if (!NumberOfUnknownFlags)
+ return;
+ Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
+ NumberOfUnknownFlags);
+ for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
+ Printf(" %s\n", UnknownFlagsNames[I]);
+ NumberOfUnknownFlags = 0;
+ }
+};
+static UnknownFlagsRegistry UnknownFlags;
+
+void reportUnrecognizedFlags() { UnknownFlags.report(); }
+
+void FlagParser::printFlagDescriptions() {
+ Printf("Available flags for Scudo:\n");
+ for (u32 I = 0; I < NumberOfFlags; ++I)
+ Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
+}
+
+static bool isSeparator(char C) {
+ return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
+ C == '\r';
+}
+
+static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
+
+void FlagParser::skipWhitespace() {
+ while (isSeparator(Buffer[Pos]))
+ ++Pos;
+}
+
+void FlagParser::parseFlag() {
+ const uptr NameStart = Pos;
+ while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ if (Buffer[Pos] != '=')
+ reportError("expected '='");
+ const char *Name = Buffer + NameStart;
+ const uptr ValueStart = ++Pos;
+ const char *Value;
+ if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
+ const char Quote = Buffer[Pos++];
+ while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
+ ++Pos;
+ if (Buffer[Pos] == 0)
+ reportError("unterminated string");
+ Value = Buffer + ValueStart + 1;
+ ++Pos; // consume the closing quote
+ } else {
+ while (!isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ Value = Buffer + ValueStart;
+ }
+ if (!runHandler(Name, Value))
+ reportError("flag parsing failed.");
+}
+
+void FlagParser::parseFlags() {
+ while (true) {
+ skipWhitespace();
+ if (Buffer[Pos] == 0)
+ break;
+ parseFlag();
+ }
+}
+
+void FlagParser::parseString(const char *S) {
+ if (!S)
+ return;
+ // Backup current parser state to allow nested parseString() calls.
+ const char *OldBuffer = Buffer;
+ const uptr OldPos = Pos;
+ Buffer = S;
+ Pos = 0;
+
+ parseFlags();
+
+ Buffer = OldBuffer;
+ Pos = OldPos;
+}
+
+INLINE bool parseBool(const char *Value, bool *b) {
+ if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
+ strncmp(Value, "false", 5) == 0) {
+ *b = false;
+ return true;
+ }
+ if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
+ strncmp(Value, "true", 4) == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value) {
+ for (u32 I = 0; I < NumberOfFlags; ++I) {
+ const uptr Len = strlen(Flags[I].Name);
+ if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
+ continue;
+ bool Ok = false;
+ switch (Flags[I].Type) {
+ case FlagType::FT_bool:
+ Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
+ if (!Ok)
+ reportInvalidFlag("bool", Value);
+ break;
+ case FlagType::FT_int:
+ char *ValueEnd;
+ *reinterpret_cast<int *>(Flags[I].Var) =
+ static_cast<int>(strtol(Value, &ValueEnd, 10));
+ Ok =
+ *ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
+ if (!Ok)
+ reportInvalidFlag("int", Value);
+ break;
+ }
+ return Ok;
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ UnknownFlags.add(Name);
+ return true;
+}
+
+void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var) {
+ CHECK_LT(NumberOfFlags, MaxFlags);
+ Flags[NumberOfFlags].Name = Name;
+ Flags[NumberOfFlags].Desc = Desc;
+ Flags[NumberOfFlags].Type = Type;
+ Flags[NumberOfFlags].Var = Var;
+ ++NumberOfFlags;
+}
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.h b/compiler-rt/lib/scudo/standalone/flags_parser.h
new file mode 100644
index 000000000000..857b50e880ec
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -0,0 +1,55 @@
+//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_PARSER_H_
+#define SCUDO_FLAGS_PARSER_H_
+
+#include "report.h"
+#include "string_utils.h"
+
+#include <stddef.h>
+
+namespace scudo {
+
+enum class FlagType : u8 {
+ FT_bool,
+ FT_int,
+};
+
+class FlagParser {
+public:
+ void registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var);
+ void parseString(const char *S);
+ void printFlagDescriptions();
+
+private:
+ static const u32 MaxFlags = 12;
+ struct Flag {
+ const char *Name;
+ const char *Desc;
+ FlagType Type;
+ void *Var;
+ } Flags[MaxFlags];
+
+ u32 NumberOfFlags = 0;
+ const char *Buffer = nullptr;
+ uptr Pos = 0;
+
+ void reportFatalError(const char *Error);
+ void skipWhitespace();
+ void parseFlags();
+ void parseFlag();
+ bool runHandler(const char *Name, const char *Value);
+};
+
+void reportUnrecognizedFlags();
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_PARSER_H_
diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
new file mode 100644
index 000000000000..0a9483ae1dd0
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -0,0 +1,189 @@
+//===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#include <limits.h> // for PAGE_SIZE
+#include <stdlib.h> // for getenv()
+#include <zircon/compiler.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+uptr getPageSize() { return PAGE_SIZE; }
+
+void NORETURN die() { __builtin_trap(); }
+
+// We zero-initialize the Extra parameter of map(), make sure this is consistent
+// with ZX_HANDLE_INVALID.
+COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
+
+static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
+ // Only scenario so far.
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
+
+ const zx_status_t Status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ Size, &Data->Vmar, &Data->VmarBase);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ return reinterpret_cast<void *>(Data->VmarBase);
+}
+
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+ MapPlatformData *Data) {
+ DCHECK_EQ(Size % PAGE_SIZE, 0);
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // For MAP_NOACCESS, just allocate a Vmar and return.
+ if (Flags & MAP_NOACCESS)
+ return allocateVmar(Size, Data, AllowNoMem);
+
+ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ CHECK_NE(Vmar, ZX_HANDLE_INVALID);
+
+ zx_status_t Status;
+ zx_handle_t Vmo;
+ uint64_t VmoSize = 0;
+ if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
+ // If a Vmo was specified, it's a resize operation.
+ CHECK(Addr);
+ DCHECK(Flags & MAP_RESIZABLE);
+ Vmo = Data->Vmo;
+ VmoSize = Data->VmoSize;
+ Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ } else {
+ // Otherwise, create a Vmo and set its name.
+ Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
+ }
+
+ uintptr_t P;
+ zx_vm_option_t MapFlags =
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
+ const uint64_t Offset =
+ Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
+ if (Offset)
+ MapFlags |= ZX_VM_SPECIFIC;
+ Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
+ // No need to track the Vmo if we don't intend on resizing it. Close it.
+ if (Flags & MAP_RESIZABLE) {
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
+ Data->Vmo = Vmo;
+ } else {
+ CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
+ }
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ if (Data)
+ Data->VmoSize += Size;
+
+ return reinterpret_cast<void *>(P);
+}
+
+void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
+ if (Flags & UNMAP_ALL) {
+ DCHECK_NE(Data, nullptr);
+ const zx_handle_t Vmar = Data->Vmar;
+ DCHECK_NE(Vmar, _zx_vmar_root_self());
+ // Destroying the vmar effectively unmaps the whole mapping.
+ CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
+ CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
+ } else {
+ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ const zx_status_t Status =
+ _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnMapUnmapError();
+ }
+ if (Data) {
+ if (Data->Vmo != ZX_HANDLE_INVALID)
+ CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
+ memset(Data, 0, sizeof(*Data));
+ }
+}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data) {
+ DCHECK(Data);
+ DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+ DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
+ const zx_status_t Status =
+ _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
+// because the Fuchsia implementation of sync_mutex_t has clang thread safety
+// annotations. Were we to apply proper capability annotations to the top level
+// HybridMutex class itself, they would not be needed. As it stands, the
+// thread analysis thinks that we are locking the mutex and accidentally leaving
+// it locked on the way out.
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ // Size and alignment must be compatible between both types.
+ return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_lock(&M);
+}
+
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_unlock(&M);
+}
+
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+
+u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
+
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+ COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
+ if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
+ return false;
+ _zx_cprng_draw(Buffer, Length);
+ return true;
+}
+
+void outputRaw(const char *Buffer) {
+ __sanitizer_log_write(Buffer, strlen(Buffer));
+}
+
+void setAbortMessage(const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.h b/compiler-rt/lib/scudo/standalone/fuchsia.h
new file mode 100644
index 000000000000..d6993f892140
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/fuchsia.h
@@ -0,0 +1,31 @@
+//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FUCHSIA_H_
+#define SCUDO_FUCHSIA_H_
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+
+namespace scudo {
+
+struct MapPlatformData {
+ zx_handle_t Vmar;
+ zx_handle_t Vmo;
+ uintptr_t VmarBase;
+ uint64_t VmoSize;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_FUCHSIA_H_
diff --git a/compiler-rt/lib/scudo/standalone/interface.h b/compiler-rt/lib/scudo/standalone/interface.h
new file mode 100644
index 000000000000..e2639823f426
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/interface.h
@@ -0,0 +1,29 @@
+//===-- interface.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_H_
+#define SCUDO_INTERFACE_H_
+
+#include "internal_defs.h"
+
+extern "C" {
+
+WEAK INTERFACE const char *__scudo_default_options();
+
+// Post-allocation & pre-deallocation hooks.
+// They must be thread-safe and not use heap related functions.
+WEAK INTERFACE void __scudo_allocate_hook(void *ptr, size_t size);
+WEAK INTERFACE void __scudo_deallocate_hook(void *ptr);
+
+WEAK INTERFACE void __scudo_print_stats(void);
+
+typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
+
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_H_
diff --git a/compiler-rt/lib/scudo/standalone/internal_defs.h b/compiler-rt/lib/scudo/standalone/internal_defs.h
new file mode 100644
index 000000000000..64ed238ebfec
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -0,0 +1,133 @@
+//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERNAL_DEFS_H_
+#define SCUDO_INTERNAL_DEFS_H_
+
+#include "platform.h"
+
+#include <stdint.h>
+
+#ifndef SCUDO_DEBUG
+#define SCUDO_DEBUG 0
+#endif
+
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
+
+// String related macros.
+
+#define STRINGIFY_(S) #S
+#define STRINGIFY(S) STRINGIFY_(S)
+#define CONCATENATE_(S, C) S##C
+#define CONCATENATE(S, C) CONCATENATE_(S, C)
+
+// Attributes & builtins related macros.
+
+#define INTERFACE __attribute__((visibility("default")))
+#define WEAK __attribute__((weak))
+#define INLINE inline
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define ALIAS(X) __attribute__((alias(X)))
+// Please only use the ALIGNED macro before the type. Using ALIGNED after the
+// variable declaration is not portable.
+#define ALIGNED(X) __attribute__((aligned(X)))
+#define FORMAT(F, A) __attribute__((format(printf, F, A)))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define THREADLOCAL __thread
+#define LIKELY(X) __builtin_expect(!!(X), 1)
+#define UNLIKELY(X) __builtin_expect(!!(X), 0)
+#if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(X) generates prefetchnt0 on x86
+#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
+#else
+#define PREFETCH(X) __builtin_prefetch(X)
+#endif
+#define UNUSED __attribute__((unused))
+#define USED __attribute__((used))
+#define NOEXCEPT noexcept
+
+namespace scudo {
+
+typedef unsigned long uptr;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef signed long sptr;
+typedef signed char s8;
+typedef signed short s16;
+typedef signed int s32;
+typedef signed long long s64;
+
+// The following two functions have platform specific implementations.
+void outputRaw(const char *Buffer);
+void NORETURN die();
+
+#define RAW_CHECK_MSG(Expr, Msg) \
+ do { \
+ if (UNLIKELY(!(Expr))) { \
+ outputRaw(Msg); \
+ die(); \
+ } \
+ } while (false)
+
+#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
+
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2);
+
+#define CHECK_IMPL(C1, Op, C2) \
+ do { \
+ u64 V1 = (u64)(C1); \
+ u64 V2 = (u64)(C2); \
+ if (UNLIKELY(!(V1 Op V2))) { \
+ reportCheckFailed(__FILE__, __LINE__, "(" #C1 ") " #Op " (" #C2 ")", V1, \
+ V2); \
+ die(); \
+ } \
+ } while (false)
+
+#define CHECK(A) CHECK_IMPL((A), !=, 0)
+#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
+#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
+#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
+#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
+#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
+#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
+
+#if SCUDO_DEBUG
+#define DCHECK(A) CHECK(A)
+#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
+#define DCHECK_NE(A, B) CHECK_NE(A, B)
+#define DCHECK_LT(A, B) CHECK_LT(A, B)
+#define DCHECK_LE(A, B) CHECK_LE(A, B)
+#define DCHECK_GT(A, B) CHECK_GT(A, B)
+#define DCHECK_GE(A, B) CHECK_GE(A, B)
+#else
+#define DCHECK(A)
+#define DCHECK_EQ(A, B)
+#define DCHECK_NE(A, B)
+#define DCHECK_LT(A, B)
+#define DCHECK_LE(A, B)
+#define DCHECK_GT(A, B)
+#define DCHECK_GE(A, B)
+#endif
+
+// The superfluous die() call effectively makes this macro NORETURN.
+#define UNREACHABLE(Msg) \
+ do { \
+ CHECK(0 && Msg); \
+ die(); \
+ } while (0)
+
+#define COMPILER_CHECK(Pred) static_assert(Pred, "")
+
+} // namespace scudo
+
+#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/compiler-rt/lib/scudo/standalone/linux.cpp b/compiler-rt/lib/scudo/standalone/linux.cpp
new file mode 100644
index 000000000000..8266a528f42c
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -0,0 +1,171 @@
+//===-- linux.cpp -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "linux.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
+
+void NORETURN die() { abort(); }
+
+void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+ if (Addr) {
+ // Currently no scenario for a noaccess mapping with a fixed address.
+ DCHECK_EQ(Flags & MAP_NOACCESS, 0);
+ MmapFlags |= MAP_FIXED;
+ }
+ void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ dieOnMapUnmapError(errno == ENOMEM);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (!(Flags & MAP_NOACCESS))
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#endif
+ return P;
+}
+
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ if (munmap(Addr, Size) != 0)
+ dieOnMapUnmapError();
+}
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ UNUSED MapPlatformData *Data) {
+ void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+// Calling getenv should be fine (c)(tm) at any time.
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+ return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+ u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+ if (V == Unlocked)
+ return;
+ if (V != Sleeping)
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ while (V != Unlocked) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+ nullptr, nullptr, 0);
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ }
+}
+
+void HybridMutex::unlock() {
+ if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+ atomic_store(&M, Unlocked, memory_order_release);
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+ nullptr, nullptr, 0);
+ }
+}
+
+u64 getMonotonicTime() {
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+}
+
+u32 getNumberOfCPUs() {
+ cpu_set_t CPUs;
+ CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
+ return static_cast<u32>(CPU_COUNT(&CPUs));
+}
+
+// Blocking is possibly unused if the getrandom block is not compiled in.
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+ if (!Buffer || !Length || Length > MaxRandomLength)
+ return false;
+ ssize_t ReadBytes;
+#if defined(SYS_getrandom)
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+ // Up to 256 bytes, getrandom will not be interrupted.
+ ReadBytes =
+ syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
+ if (ReadBytes == static_cast<ssize_t>(Length))
+ return true;
+#endif // defined(SYS_getrandom)
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ const int FileDesc = open("/dev/urandom", O_RDONLY);
+ if (FileDesc == -1)
+ return false;
+ ReadBytes = read(FileDesc, Buffer, Length);
+ close(FileDesc);
+ return (ReadBytes == static_cast<ssize_t>(Length));
+}
+
+void outputRaw(const char *Buffer) {
+ static HybridMutex Mutex;
+ ScopedLock L(Mutex);
+ write(2, Buffer, strlen(Buffer));
+}
+
+extern "C" WEAK void android_set_abort_message(const char *);
+
+void setAbortMessage(const char *Message) {
+ if (&android_set_abort_message)
+ android_set_abort_message(Message);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/compiler-rt/lib/scudo/standalone/linux.h b/compiler-rt/lib/scudo/standalone/linux.h
new file mode 100644
index 000000000000..c8e41484c851
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/linux.h
@@ -0,0 +1,70 @@
+//===-- linux.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LINUX_H_
+#define SCUDO_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+namespace scudo {
+
+// MapPlatformData is unused on Linux, define it as a minimally sized structure.
+struct MapPlatformData {};
+
+#if SCUDO_ANDROID
+
+#if defined(__aarch64__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__arm__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__i386__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("movl %%gs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__x86_64__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mov %%fs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for sanitizers starting
+// with Q, given that Android currently doesn't support ELF TLS. It is used to
+// store sanitizer thread specific data.
+static const int TLS_SLOT_SANITIZER = 6;
+
+ALWAYS_INLINE uptr *getAndroidTlsPtr() {
+ return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
+}
+
+#endif // SCUDO_ANDROID
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_LINUX_H_
diff --git a/compiler-rt/lib/scudo/standalone/list.h b/compiler-rt/lib/scudo/standalone/list.h
new file mode 100644
index 000000000000..6a7b9bd747a7
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/list.h
@@ -0,0 +1,156 @@
+//===-- list.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LIST_H_
+#define SCUDO_LIST_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Intrusive POD singly-linked list.
+// An object with all zero fields should represent a valid empty list. clear()
+// should be called on all non-zero-initialized objects before using.
+template <class Item> struct IntrusiveList {
+ friend class Iterator;
+
+ void clear() {
+ First = Last = nullptr;
+ Size = 0;
+ }
+
+ bool empty() const { return Size == 0; }
+ uptr size() const { return Size; }
+
+ void push_back(Item *X) {
+ if (empty()) {
+ X->Next = nullptr;
+ First = Last = X;
+ Size = 1;
+ } else {
+ X->Next = nullptr;
+ Last->Next = X;
+ Last = X;
+ Size++;
+ }
+ }
+
+ void push_front(Item *X) {
+ if (empty()) {
+ X->Next = nullptr;
+ First = Last = X;
+ Size = 1;
+ } else {
+ X->Next = First;
+ First = X;
+ Size++;
+ }
+ }
+
+ void pop_front() {
+ DCHECK(!empty());
+ First = First->Next;
+ if (!First)
+ Last = nullptr;
+ Size--;
+ }
+
+ void extract(Item *Prev, Item *X) {
+ DCHECK(!empty());
+ DCHECK_NE(Prev, nullptr);
+ DCHECK_NE(X, nullptr);
+ DCHECK_EQ(Prev->Next, X);
+ Prev->Next = X->Next;
+ if (Last == X)
+ Last = Prev;
+ Size--;
+ }
+
+ Item *front() { return First; }
+ const Item *front() const { return First; }
+ Item *back() { return Last; }
+ const Item *back() const { return Last; }
+
+ void append_front(IntrusiveList<Item> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else if (!L->empty()) {
+ L->Last->Next = First;
+ First = L->First;
+ Size += L->size();
+ }
+ L->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else {
+ Last->Next = L->First;
+ Last = L->Last;
+ Size += L->size();
+ }
+ L->clear();
+ }
+
+ void checkConsistency() {
+ if (Size == 0) {
+ CHECK_EQ(First, nullptr);
+ CHECK_EQ(Last, nullptr);
+ } else {
+ uptr Count = 0;
+ for (Item *I = First;; I = I->Next) {
+ Count++;
+ if (I == Last)
+ break;
+ }
+ CHECK_EQ(size(), Count);
+ CHECK_EQ(Last->Next, nullptr);
+ }
+ }
+
+ template <class ItemT> class IteratorBase {
+ public:
+ explicit IteratorBase(ItemT *CurrentItem) : Current(CurrentItem) {}
+ IteratorBase &operator++() {
+ Current = Current->Next;
+ return *this;
+ }
+ bool operator!=(IteratorBase Other) const {
+ return Current != Other.Current;
+ }
+ ItemT &operator*() { return *Current; }
+
+ private:
+ ItemT *Current;
+ };
+
+ typedef IteratorBase<Item> Iterator;
+ typedef IteratorBase<const Item> ConstIterator;
+
+ Iterator begin() { return Iterator(First); }
+ Iterator end() { return Iterator(nullptr); }
+
+ ConstIterator begin() const { return ConstIterator(First); }
+ ConstIterator end() const { return ConstIterator(nullptr); }
+
+private:
+ uptr Size;
+ Item *First;
+ Item *Last;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LIST_H_
diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h
new file mode 100644
index 000000000000..b08abd3e5d9b
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -0,0 +1,181 @@
+//===-- local_cache.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LOCAL_CACHE_H_
+#define SCUDO_LOCAL_CACHE_H_
+
+#include "internal_defs.h"
+#include "report.h"
+#include "stats.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+
+ struct TransferBatch {
+ static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(void **Array, u32 N) {
+ DCHECK_LE(N, MaxNumCached);
+ Count = N;
+ memcpy(Batch, Array, sizeof(void *) * Count);
+ }
+ void clear() { Count = 0; }
+ void add(void *P) {
+ DCHECK_LT(Count, MaxNumCached);
+ Batch[Count++] = P;
+ }
+ void copyToArray(void **Array) const {
+ memcpy(Array, Batch, sizeof(void *) * Count);
+ }
+ u32 getCount() const { return Count; }
+ void *get(u32 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ static u32 getMaxCached(uptr Size) {
+ return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
+ }
+ TransferBatch *Next;
+
+ private:
+ u32 Count;
+ void *Batch[MaxNumCached];
+ };
+
+ void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
+ Stats.initLinkerInitialized();
+ if (LIKELY(S))
+ S->link(&Stats);
+ Allocator = A;
+ }
+
+ void init(GlobalStats *S, SizeClassAllocator *A) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(S, A);
+ }
+
+ void destroy(GlobalStats *S) {
+ drain();
+ if (LIKELY(S))
+ S->unlink(&Stats);
+ }
+
+ void *allocate(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ if (C->Count == 0) {
+ if (UNLIKELY(!refill(C, ClassId)))
+ return nullptr;
+ DCHECK_GT(C->Count, 0);
+ }
+ // We read ClassSize first before accessing Chunks because it's adjacent to
+ // Count, while Chunks might be further off (depending on Count). That keeps
+ // the memory accesses in close quarters.
+ const uptr ClassSize = C->ClassSize;
+ void *P = C->Chunks[--C->Count];
+ // The jury is still out as to whether any kind of PREFETCH here increases
+ // performance. It definitely decreases performance on Android though.
+ // if (!SCUDO_ANDROID) PREFETCH(P);
+ Stats.add(StatAllocated, ClassSize);
+ Stats.sub(StatFree, ClassSize);
+ return P;
+ }
+
+ void deallocate(uptr ClassId, void *P) {
+ CHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ // We still have to initialize the cache in the event that the first heap
+ // operation in a thread is a deallocation.
+ initCacheMaybe(C);
+ if (C->Count == C->MaxCount)
+ drain(C, ClassId);
+ // See comment in allocate() about memory accesses.
+ const uptr ClassSize = C->ClassSize;
+ C->Chunks[C->Count++] = P;
+ Stats.sub(StatAllocated, ClassSize);
+ Stats.add(StatFree, ClassSize);
+ }
+
+ void drain() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *C = &PerClassArray[I];
+ while (C->Count > 0)
+ drain(C, I);
+ }
+ }
+
+ TransferBatch *createBatch(uptr ClassId, void *B) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ B = allocate(SizeClassMap::BatchClassId);
+ return reinterpret_cast<TransferBatch *>(B);
+ }
+
+ LocalStats &getStats() { return Stats; }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ struct PerClass {
+ u32 Count;
+ u32 MaxCount;
+ uptr ClassSize;
+ void *Chunks[2 * TransferBatch::MaxNumCached];
+ };
+ PerClass PerClassArray[NumClasses];
+ LocalStats Stats;
+ SizeClassAllocator *Allocator;
+
+ ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
+ if (LIKELY(C->MaxCount))
+ return;
+ initCache();
+ DCHECK_NE(C->MaxCount, 0U);
+ }
+
+ NOINLINE void initCache() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *P = &PerClassArray[I];
+ const uptr Size = SizeClassAllocator::getSizeByClassId(I);
+ P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
+ P->ClassSize = Size;
+ }
+ }
+
+ void destroyBatch(uptr ClassId, void *B) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ deallocate(SizeClassMap::BatchClassId, B);
+ }
+
+ NOINLINE bool refill(PerClass *C, uptr ClassId) {
+ initCacheMaybe(C);
+ TransferBatch *B = Allocator->popBatch(this, ClassId);
+ if (UNLIKELY(!B))
+ return false;
+ DCHECK_GT(B->getCount(), 0);
+ C->Count = B->getCount();
+ B->copyToArray(C->Chunks);
+ destroyBatch(ClassId, B);
+ return true;
+ }
+
+ NOINLINE void drain(PerClass *C, uptr ClassId) {
+ const u32 Count = Min(C->MaxCount / 2, C->Count);
+ const uptr FirstIndexToDrain = C->Count - Count;
+ TransferBatch *B = createBatch(ClassId, C->Chunks[FirstIndexToDrain]);
+ if (UNLIKELY(!B))
+ reportOutOfMemory(
+ SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
+ B->setFromArray(&C->Chunks[FirstIndexToDrain], Count);
+ C->Count -= Count;
+ Allocator->pushBatch(ClassId, B);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LOCAL_CACHE_H_
diff --git a/compiler-rt/lib/scudo/standalone/mutex.h b/compiler-rt/lib/scudo/standalone/mutex.h
new file mode 100644
index 000000000000..b26b2df06627
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/mutex.h
@@ -0,0 +1,73 @@
+//===-- mutex.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MUTEX_H_
+#define SCUDO_MUTEX_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
+namespace scudo {
+
+class HybridMutex {
+public:
+ void init() { memset(this, 0, sizeof(*this)); }
+ bool tryLock();
+ NOINLINE void lock() {
+ if (LIKELY(tryLock()))
+ return;
+ // The compiler may try to fully unroll the loop, ending up in a
+ // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+ // is large, ugly and unneeded, a compact loop is better for our purpose
+ // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+ for (u8 I = 0U; I < NumberOfTries; I++) {
+ yieldProcessor(NumberOfYields);
+ if (tryLock())
+ return;
+ }
+ lockSlow();
+ }
+ void unlock();
+
+private:
+ static constexpr u8 NumberOfTries = 8U;
+ static constexpr u8 NumberOfYields = 8U;
+
+#if SCUDO_LINUX
+ atomic_u32 M;
+#elif SCUDO_FUCHSIA
+ sync_mutex_t M;
+#endif
+
+ void lockSlow();
+};
+
+class ScopedLock {
+public:
+ explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() { Mutex.unlock(); }
+
+private:
+ HybridMutex &Mutex;
+
+ ScopedLock(const ScopedLock &) = delete;
+ void operator=(const ScopedLock &) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MUTEX_H_
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
new file mode 100644
index 000000000000..a897a566f9bf
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -0,0 +1,70 @@
+//===-- platform.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#if defined(__linux__)
+#define SCUDO_LINUX 1
+#else
+#define SCUDO_LINUX 0
+#endif
+
+#if defined(__ANDROID__)
+#define SCUDO_ANDROID 1
+#else
+#define SCUDO_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+#define SCUDO_FUCHSIA 1
+#else
+#define SCUDO_FUCHSIA 0
+#endif
+
+#if __LP64__
+#define SCUDO_WORDSIZE 64U
+#else
+#define SCUDO_WORDSIZE 32U
+#endif
+
+#if SCUDO_WORDSIZE == 64U
+#define FIRST_32_SECOND_64(a, b) (b)
+#else
+#define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#ifndef SCUDO_CAN_USE_PRIMARY64
+#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
+#endif
+
+#ifndef SCUDO_MIN_ALIGNMENT_LOG
+// We force malloc-type functions to be aligned to std::max_align_t, but there
+// is no reason why the minimum alignment for all other functions can't be 8
+// bytes. Except obviously for applications making incorrect assumptions.
+// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
+#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
+#endif
+
+#if defined(__aarch64__)
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+#else
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define SCUDO_CACHE_LINE_SIZE 128
+#else
+#define SCUDO_CACHE_LINE_SIZE 64
+#endif
+
+#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+
+#endif // SCUDO_PLATFORM_H_
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
new file mode 100644
index 000000000000..9123d07b49b9
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -0,0 +1,411 @@
+//===-- primary32.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY32_H_
+#define SCUDO_PRIMARY32_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "report.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
+//
+// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
+// boundary, and keeps a bytemap of the mappable address space to track the size
+// class they are associated with.
+//
+// Mapped regions are split into equally sized Blocks according to the size
+// class they belong to, and the associated pointers are shuffled to prevent any
+// predictable address pattern (the predictability increases with the block
+// size).
+//
+// Regions for size class 0 are special and used to hold TransferBatches, which
+// allow to transfer arrays of pointers from the global size class freelist to
+// the thread specific freelist for said class, and back.
+//
+// Memory used by this allocator is never unmapped but can be partially
+// reclaimed if the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
+public:
+ typedef SizeClassMapT SizeClassMap;
+ // Regions should be large enough to hold the largest Block.
+ COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
+ typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef typename CacheT::TransferBatch TransferBatch;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatch)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ if (SCUDO_FUCHSIA)
+ reportError("SizeClassAllocator32 is not supported on Fuchsia");
+
+ PossibleRegions.initLinkerInitialized();
+ MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
+
+ u32 Seed;
+ if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ Seed =
+ static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
+ const uptr PageSize = getPageSizeCached();
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ Sci->RandState = getRandomU32(&Seed);
+ // See comment in the 64-bit primary about releasing smaller size classes.
+ Sci->CanRelease = (ReleaseToOsInterval >= 0) &&
+ (I != SizeClassMap::BatchClassId) &&
+ (getSizeByClassId(I) >= (PageSize / 32));
+ }
+ ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ }
+ void init(s32 ReleaseToOsInterval) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(ReleaseToOsInterval);
+ }
+
+ void unmapTestOnly() {
+ while (NumberOfStashedRegions > 0)
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ // TODO(kostyak): unmap the TransferBatch regions as well.
+ for (uptr I = 0; I < NumRegions; I++)
+ if (PossibleRegions[I])
+ unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+ PossibleRegions.unmapTestOnly();
+ }
+
+ TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+ TransferBatch *B = Sci->FreeList.front();
+ if (B) {
+ Sci->FreeList.pop_front();
+ } else {
+ B = populateFreeList(C, ClassId, Sci);
+ if (UNLIKELY(!B))
+ return nullptr;
+ }
+ DCHECK_GT(B->getCount(), 0);
+ Sci->Stats.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ void pushBatch(uptr ClassId, TransferBatch *B) {
+ DCHECK_LT(ClassId, NumClasses);
+ DCHECK_GT(B->getCount(), 0);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+ Sci->FreeList.push_front(B);
+ Sci->Stats.PushedBlocks += B->getCount();
+ if (Sci->CanRelease)
+ releaseToOSMaybe(Sci, ClassId);
+ }
+
+ void disable() {
+ for (uptr I = 0; I < NumClasses; I++)
+ getSizeClassInfo(I)->Mutex.lock();
+ }
+
+ void enable() {
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+ getSizeClassInfo(static_cast<uptr>(I))->Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) {
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ if (PossibleRegions[I]) {
+ const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
+ const uptr From = I * RegionSize;
+ const uptr To = From + (RegionSize / BlockSize) * BlockSize;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void getStats(ScopedString *Str) {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ TotalMapped += Sci->AllocatedUser;
+ PoppedBlocks += Sci->Stats.PoppedBlocks;
+ PushedBlocks += Sci->Stats.PushedBlocks;
+ }
+ Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
+ for (uptr I = 0; I < NumClasses; I++)
+ getStats(Str, I, 0);
+ }
+
+ uptr releaseToOS() {
+ uptr TotalReleasedBytes = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
+ }
+ return TotalReleasedBytes;
+ }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
+#if SCUDO_WORDSIZE == 32U
+ typedef FlatByteMap<NumRegions> ByteMap;
+#else
+ typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
+#endif
+
+ struct SizeClassStats {
+ uptr PoppedBlocks;
+ uptr PushedBlocks;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr PushedBlocksAtLastRelease;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
+ HybridMutex Mutex;
+ IntrusiveList<TransferBatch> FreeList;
+ SizeClassStats Stats;
+ bool CanRelease;
+ u32 RandState;
+ uptr AllocatedUser;
+ ReleaseToOsInfo ReleaseInfo;
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+ uptr computeRegionId(uptr Mem) {
+ const uptr Id = Mem >> RegionSizeLog;
+ CHECK_LT(Id, NumRegions);
+ return Id;
+ }
+
+ uptr allocateRegionSlow() {
+ uptr MapSize = 2 * RegionSize;
+ const uptr MapBase = reinterpret_cast<uptr>(
+ map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
+ if (UNLIKELY(!MapBase))
+ return 0;
+ const uptr MapEnd = MapBase + MapSize;
+ uptr Region = MapBase;
+ if (isAligned(Region, RegionSize)) {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions < MaxStashedRegions)
+ RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
+ else
+ MapSize = RegionSize;
+ } else {
+ Region = roundUpTo(MapBase, RegionSize);
+ unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
+ MapSize = RegionSize;
+ }
+ const uptr End = Region + MapSize;
+ if (End != MapEnd)
+ unmap(reinterpret_cast<void *>(End), MapEnd - End);
+ return Region;
+ }
+
+ uptr allocateRegion(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ uptr Region = 0;
+ {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions > 0)
+ Region = RegionsStash[--NumberOfStashedRegions];
+ }
+ if (!Region)
+ Region = allocateRegionSlow();
+ if (LIKELY(Region)) {
+ if (ClassId) {
+ const uptr RegionIndex = computeRegionId(Region);
+ if (RegionIndex < MinRegionIndex)
+ MinRegionIndex = RegionIndex;
+ if (RegionIndex > MaxRegionIndex)
+ MaxRegionIndex = RegionIndex;
+ PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
+ }
+ }
+ return Region;
+ }
+
+ SizeClassInfo *getSizeClassInfo(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ return &SizeClassInfoArray[ClassId];
+ }
+
+ bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
+ TransferBatch **CurrentBatch, u32 MaxCount,
+ void **PointersArray, u32 Count) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(PointersArray, Count, &Sci->RandState);
+ TransferBatch *B = *CurrentBatch;
+ for (uptr I = 0; I < Count; I++) {
+ if (B && B->getCount() == MaxCount) {
+ Sci->FreeList.push_back(B);
+ B = nullptr;
+ }
+ if (!B) {
+ B = C->createBatch(ClassId, PointersArray[I]);
+ if (UNLIKELY(!B))
+ return false;
+ B->clear();
+ }
+ B->add(PointersArray[I]);
+ }
+ *CurrentBatch = B;
+ return true;
+ }
+
+ NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+ SizeClassInfo *Sci) {
+ const uptr Region = allocateRegion(ClassId);
+ if (UNLIKELY(!Region))
+ return nullptr;
+ C->getStats().add(StatMapped, RegionSize);
+ const uptr Size = getSizeByClassId(ClassId);
+ const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ DCHECK_GT(MaxCount, 0);
+ const uptr NumberOfBlocks = RegionSize / Size;
+ DCHECK_GT(NumberOfBlocks, 0);
+ TransferBatch *B = nullptr;
+ constexpr uptr ShuffleArraySize = 48;
+ void *ShuffleArray[ShuffleArraySize];
+ u32 Count = 0;
+ const uptr AllocatedUser = NumberOfBlocks * Size;
+ for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
+ ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+ if (Count == ShuffleArraySize) {
+ if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ Count = 0;
+ }
+ }
+ if (Count) {
+ if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
+ Count)))
+ return nullptr;
+ }
+ DCHECK(B);
+ DCHECK_GT(B->getCount(), 0);
+
+ C->getStats().add(StatFree, AllocatedUser);
+ Sci->AllocatedUser += AllocatedUser;
+ if (Sci->CanRelease)
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return B;
+ }
+
+ void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ if (Sci->AllocatedUser == 0)
+ return;
+ const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+ const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
+ Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu avail: %6zu rss: %6zuK\n",
+ ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
+ Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
+ AvailableChunks, Rss >> 10);
+ }
+
+ NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ bool Force = false) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr PageSize = getPageSizeCached();
+
+ CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ const uptr BytesInFreeList =
+ Sci->AllocatedUser -
+ (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
+ if (BytesInFreeList < PageSize)
+ return 0; // No chance to release anything.
+ if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
+ BlockSize <
+ PageSize) {
+ return 0; // Nothing new to release.
+ }
+
+ if (!Force) {
+ const s32 IntervalMs = ReleaseToOsIntervalMs;
+ if (IntervalMs < 0)
+ return 0;
+ if (Sci->ReleaseInfo.LastReleaseAtNs +
+ static_cast<uptr>(IntervalMs) * 1000000ULL >
+ getMonotonicTime()) {
+ return 0; // Memory was returned recently.
+ }
+ }
+
+ // TODO(kostyak): currently not ideal as we loop over all regions and
+ // iterate multiple times over the same freelist if a ClassId spans multiple
+ // regions. But it will have to do for now.
+ uptr TotalReleasedBytes = 0;
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
+ if (PossibleRegions[I] == ClassId) {
+ ReleaseRecorder Recorder(I * RegionSize);
+ releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
+ RegionSize / PageSize, BlockSize, &Recorder);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
+ }
+ }
+ }
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return TotalReleasedBytes;
+ }
+
+ SizeClassInfo SizeClassInfoArray[NumClasses];
+
+ ByteMap PossibleRegions;
+ // Keep track of the lowest & highest regions allocated to avoid looping
+ // through the whole NumRegions.
+ uptr MinRegionIndex;
+ uptr MaxRegionIndex;
+ s32 ReleaseToOsIntervalMs;
+ // Unless several threads request regions simultaneously from different size
+ // classes, the stash rarely contains more than 1 entry.
+ static constexpr uptr MaxStashedRegions = 4;
+ HybridMutex RegionsStashMutex;
+ uptr NumberOfStashedRegions;
+ uptr RegionsStash[MaxStashedRegions];
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY32_H_
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
new file mode 100644
index 000000000000..8f443ea7fa3f
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -0,0 +1,392 @@
+//===-- primary64.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY64_H_
+#define SCUDO_PRIMARY64_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
+//
+// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
+// Regions, specific to each size class. Note that the base of that mapping is
+// random (based to the platform specific map() capabilities), and that each
+// Region actually starts at a random offset from its base.
+//
+// Regions are mapped incrementally on demand to fulfill allocation requests,
+// those mappings being split into equally sized Blocks based on the size class
+// they belong to. The Blocks created are shuffled to prevent predictable
+// address patterns (the predictability increases with the size of the Blocks).
+//
+// The 1st Region (for size class 0) holds the TransferBatches. This is a
+// structure used to transfer arrays of available pointers from the class size
+// freelist to the thread specific freelist, and back.
+//
+// The memory used by this allocator is never unmapped, but can be partially
+// released if the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
+public:
+ typedef SizeClassMapT SizeClassMap;
+ typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef typename CacheT::TransferBatch TransferBatch;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatch)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ // Reserve the space required for the Primary.
+ PrimaryBase = reinterpret_cast<uptr>(
+ map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
+
+ RegionInfoArray = reinterpret_cast<RegionInfo *>(
+ map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo"));
+ DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE,
+ 0);
+
+ u32 Seed;
+ if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12));
+ const uptr PageSize = getPageSizeCached();
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ // The actual start of a region is offseted by a random number of pages.
+ Region->RegionBeg =
+ getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
+ // Releasing smaller size classes doesn't necessarily yield to a
+ // meaningful RSS impact: there are more blocks per page, they are
+ // randomized around, and thus pages are less likely to be entirely empty.
+ // On top of this, attempting to release those require more iterations and
+ // memory accesses which ends up being fairly costly. The current lower
+ // limit is mostly arbitrary and based on empirical observations.
+ // TODO(kostyak): make the lower limit a runtime option
+ Region->CanRelease = (ReleaseToOsInterval >= 0) &&
+ (I != SizeClassMap::BatchClassId) &&
+ (getSizeByClassId(I) >= (PageSize / 32));
+ Region->RandState = getRandomU32(&Seed);
+ }
+ ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ }
+ void init(s32 ReleaseToOsInterval) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(ReleaseToOsInterval);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+ unmap(reinterpret_cast<void *>(RegionInfoArray),
+ sizeof(RegionInfo) * NumClasses);
+ }
+
+ TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ ScopedLock L(Region->Mutex);
+ TransferBatch *B = Region->FreeList.front();
+ if (B) {
+ Region->FreeList.pop_front();
+ } else {
+ B = populateFreeList(C, ClassId, Region);
+ if (UNLIKELY(!B))
+ return nullptr;
+ }
+ DCHECK_GT(B->getCount(), 0);
+ Region->Stats.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ void pushBatch(uptr ClassId, TransferBatch *B) {
+ DCHECK_GT(B->getCount(), 0);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ ScopedLock L(Region->Mutex);
+ Region->FreeList.push_front(B);
+ Region->Stats.PushedBlocks += B->getCount();
+ if (Region->CanRelease)
+ releaseToOSMaybe(Region, ClassId);
+ }
+
+ void disable() {
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->Mutex.lock();
+ }
+
+ void enable() {
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+ getRegionInfo(static_cast<uptr>(I))->Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ const RegionInfo *Region = getRegionInfo(I);
+ const uptr BlockSize = getSizeByClassId(I);
+ const uptr From = Region->RegionBeg;
+ const uptr To = From + Region->AllocatedUser;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void getStats(ScopedString *Str) const {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ if (Region->MappedUser)
+ TotalMapped += Region->MappedUser;
+ PoppedBlocks += Region->Stats.PoppedBlocks;
+ PushedBlocks += Region->Stats.PushedBlocks;
+ }
+ Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
+ "allocations; remains %zu\n",
+ TotalMapped >> 20, 0, PoppedBlocks,
+ PoppedBlocks - PushedBlocks);
+
+ for (uptr I = 0; I < NumClasses; I++)
+ getStats(Str, I, 0);
+ }
+
+ uptr releaseToOS() {
+ uptr TotalReleasedBytes = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->Mutex);
+ TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
+ }
+ return TotalReleasedBytes;
+ }
+
+private:
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr PrimarySize = RegionSize * NumClasses;
+
+ // Call map for user memory with at least this size.
+ static const uptr MapSizeIncrement = 1UL << 17;
+
+ struct RegionStats {
+ uptr PoppedBlocks;
+ uptr PushedBlocks;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr PushedBlocksAtLastRelease;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
+ HybridMutex Mutex;
+ IntrusiveList<TransferBatch> FreeList;
+ RegionStats Stats;
+ bool CanRelease;
+ bool Exhausted;
+ u32 RandState;
+ uptr RegionBeg;
+ uptr MappedUser; // Bytes mapped for user memory.
+ uptr AllocatedUser; // Bytes allocated for user memory.
+ MapPlatformData Data;
+ ReleaseToOsInfo ReleaseInfo;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+ uptr PrimaryBase;
+ RegionInfo *RegionInfoArray;
+ MapPlatformData Data;
+ s32 ReleaseToOsIntervalMs;
+
+ RegionInfo *getRegionInfo(uptr ClassId) const {
+ DCHECK_LT(ClassId, NumClasses);
+ return &RegionInfoArray[ClassId];
+ }
+
+ uptr getRegionBaseByClassId(uptr ClassId) const {
+ return PrimaryBase + (ClassId << RegionSizeLog);
+ }
+
+ bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
+ TransferBatch **CurrentBatch, u32 MaxCount,
+ void **PointersArray, u32 Count) {
+ // No need to shuffle the batches size class.
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(PointersArray, Count, &Region->RandState);
+ TransferBatch *B = *CurrentBatch;
+ for (uptr I = 0; I < Count; I++) {
+ if (B && B->getCount() == MaxCount) {
+ Region->FreeList.push_back(B);
+ B = nullptr;
+ }
+ if (!B) {
+ B = C->createBatch(ClassId, PointersArray[I]);
+ if (UNLIKELY(!B))
+ return false;
+ B->clear();
+ }
+ B->add(PointersArray[I]);
+ }
+ *CurrentBatch = B;
+ return true;
+ }
+
+ NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+ RegionInfo *Region) {
+ const uptr Size = getSizeByClassId(ClassId);
+ const u32 MaxCount = TransferBatch::getMaxCached(Size);
+
+ const uptr RegionBeg = Region->RegionBeg;
+ const uptr MappedUser = Region->MappedUser;
+ const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
+ // Map more space for blocks, if necessary.
+ if (TotalUserBytes > MappedUser) {
+ // Do the mmap for the user memory.
+ const uptr UserMapSize =
+ roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+ const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
+ if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
+ if (!Region->Exhausted) {
+ Region->Exhausted = true;
+ ScopedString Str(1024);
+ getStats(&Str);
+ Str.append(
+ "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
+ RegionSize >> 20, Size);
+ Str.output();
+ }
+ return nullptr;
+ }
+ if (UNLIKELY(MappedUser == 0))
+ Region->Data = Data;
+ if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
+ UserMapSize, "scudo:primary",
+ MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data)))
+ return nullptr;
+ Region->MappedUser += UserMapSize;
+ C->getStats().add(StatMapped, UserMapSize);
+ }
+
+ const uptr NumberOfBlocks = Min(
+ 8UL * MaxCount, (Region->MappedUser - Region->AllocatedUser) / Size);
+ DCHECK_GT(NumberOfBlocks, 0);
+
+ TransferBatch *B = nullptr;
+ constexpr uptr ShuffleArraySize = 48;
+ void *ShuffleArray[ShuffleArraySize];
+ u32 Count = 0;
+ const uptr P = RegionBeg + Region->AllocatedUser;
+ const uptr AllocatedUser = NumberOfBlocks * Size;
+ for (uptr I = P; I < P + AllocatedUser; I += Size) {
+ ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+ if (Count == ShuffleArraySize) {
+ if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ Count = 0;
+ }
+ }
+ if (Count) {
+ if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ }
+ DCHECK(B);
+ DCHECK_GT(B->getCount(), 0);
+
+ C->getStats().add(StatFree, AllocatedUser);
+ Region->AllocatedUser += AllocatedUser;
+ Region->Exhausted = false;
+ if (Region->CanRelease)
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
+ return B;
+ }
+
+ void getStats(ScopedString *Str, uptr ClassId, uptr Rss) const {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ if (Region->MappedUser == 0)
+ return;
+ const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+ const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
+ Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
+ "released: %6zuK region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "F" : " ", ClassId,
+ getSizeByClassId(ClassId), Region->MappedUser >> 10,
+ Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
+ TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
+ }
+
+ NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+ bool Force = false) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr PageSize = getPageSizeCached();
+
+ CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
+ const uptr BytesInFreeList =
+ Region->AllocatedUser -
+ (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
+ if (BytesInFreeList < PageSize)
+ return 0; // No chance to release anything.
+ if ((Region->Stats.PushedBlocks -
+ Region->ReleaseInfo.PushedBlocksAtLastRelease) *
+ BlockSize <
+ PageSize) {
+ return 0; // Nothing new to release.
+ }
+
+ if (!Force) {
+ const s32 IntervalMs = ReleaseToOsIntervalMs;
+ if (IntervalMs < 0)
+ return 0;
+ if (Region->ReleaseInfo.LastReleaseAtNs +
+ static_cast<uptr>(IntervalMs) * 1000000ULL >
+ getMonotonicTime()) {
+ return 0; // Memory was returned recently.
+ }
+ }
+
+ ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
+ releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg,
+ roundUpTo(Region->AllocatedUser, PageSize) / PageSize,
+ BlockSize, &Recorder);
+
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.PushedBlocksAtLastRelease =
+ Region->Stats.PushedBlocks;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return Recorder.getReleasedBytes();
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY64_H_
diff --git a/compiler-rt/lib/scudo/standalone/quarantine.h b/compiler-rt/lib/scudo/standalone/quarantine.h
new file mode 100644
index 000000000000..35fd0bc197ea
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -0,0 +1,289 @@
+//===-- quarantine.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_QUARANTINE_H_
+#define SCUDO_QUARANTINE_H_
+
+#include "list.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+struct QuarantineBatch {
+ // With the following count, a batch (and the header that protects it) occupy
+ // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
+ static const u32 MaxCount = 1019;
+ QuarantineBatch *Next;
+ uptr Size;
+ u32 Count;
+ void *Batch[MaxCount];
+
+ void init(void *Ptr, uptr Size) {
+ Count = 1;
+ Batch[0] = Ptr;
+ this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
+
+ void push_back(void *Ptr, uptr Size) {
+ DCHECK_LT(Count, MaxCount);
+ Batch[Count++] = Ptr;
+ this->Size += Size;
+ }
+
+ bool canMerge(const QuarantineBatch *const From) const {
+ return Count + From->Count <= MaxCount;
+ }
+
+ void merge(QuarantineBatch *const From) {
+ DCHECK_LE(Count + From->Count, MaxCount);
+ DCHECK_GE(Size, sizeof(QuarantineBatch));
+
+ for (uptr I = 0; I < From->Count; ++I)
+ Batch[Count + I] = From->Batch[I];
+ Count += From->Count;
+ Size += From->getQuarantinedSize();
+
+ From->Count = 0;
+ From->Size = sizeof(QuarantineBatch);
+ }
+
+ void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
+};
+
+COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb.
+
+// Per-thread cache of memory blocks.
+template <typename Callback> class QuarantineCache {
+public:
+ void initLinkerInitialized() {}
+ void init() {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized();
+ }
+
+ // Total memory used, including internal accounting.
+ uptr getSize() const { return atomic_load_relaxed(&Size); }
+ // Memory used for internal accounting.
+ uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
+
+ void enqueue(Callback Cb, void *Ptr, uptr Size) {
+ if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
+ QuarantineBatch *B =
+ reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
+ DCHECK(B);
+ B->init(Ptr, Size);
+ enqueueBatch(B);
+ } else {
+ List.back()->push_back(Ptr, Size);
+ addToSize(Size);
+ }
+ }
+
+ void transfer(QuarantineCache *From) {
+ List.append_back(&From->List);
+ addToSize(From->getSize());
+ atomic_store_relaxed(&From->Size, 0);
+ }
+
+ void enqueueBatch(QuarantineBatch *B) {
+ List.push_back(B);
+ addToSize(B->Size);
+ }
+
+ QuarantineBatch *dequeueBatch() {
+ if (List.empty())
+ return nullptr;
+ QuarantineBatch *B = List.front();
+ List.pop_front();
+ subFromSize(B->Size);
+ return B;
+ }
+
+ void mergeBatches(QuarantineCache *ToDeallocate) {
+ uptr ExtractedSize = 0;
+ QuarantineBatch *Current = List.front();
+ while (Current && Current->Next) {
+ if (Current->canMerge(Current->Next)) {
+ QuarantineBatch *Extracted = Current->Next;
+ // Move all the chunks into the current batch.
+ Current->merge(Extracted);
+ DCHECK_EQ(Extracted->Count, 0);
+ DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
+ // Remove the next batch From the list and account for its Size.
+ List.extract(Current, Extracted);
+ ExtractedSize += Extracted->Size;
+ // Add it to deallocation list.
+ ToDeallocate->enqueueBatch(Extracted);
+ } else {
+ Current = Current->Next;
+ }
+ }
+ subFromSize(ExtractedSize);
+ }
+
+ void getStats(ScopedString *Str) const {
+ uptr BatchCount = 0;
+ uptr TotalOverheadBytes = 0;
+ uptr TotalBytes = 0;
+ uptr TotalQuarantineChunks = 0;
+ for (const QuarantineBatch &Batch : List) {
+ BatchCount++;
+ TotalBytes += Batch.Size;
+ TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
+ TotalQuarantineChunks += Batch.Count;
+ }
+ const uptr QuarantineChunksCapacity =
+ BatchCount * QuarantineBatch::MaxCount;
+ const uptr ChunksUsagePercent =
+ (QuarantineChunksCapacity == 0)
+ ? 0
+ : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
+ const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
+ const uptr MemoryOverheadPercent =
+ (TotalQuarantinedBytes == 0)
+ ? 0
+ : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
+ Str->append(
+ "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
+ "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
+ BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
+ QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
+ }
+
+private:
+ IntrusiveList<QuarantineBatch> List;
+ atomic_uptr Size;
+
+ void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
+ void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
+};
+
+// The callback interface is:
+// void Callback::recycle(Node *Ptr);
+// void *Callback::allocate(uptr Size);
+// void Callback::deallocate(void *Ptr);
+template <typename Callback, typename Node> class GlobalQuarantine {
+public:
+ typedef QuarantineCache<Callback> CacheT;
+
+ void initLinkerInitialized(uptr Size, uptr CacheSize) {
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per put() call).
+ CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
+
+ atomic_store_relaxed(&MaxSize, Size);
+ atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&MaxCacheSize, CacheSize);
+
+ Cache.initLinkerInitialized();
+ }
+ void init(uptr Size, uptr CacheSize) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Size, CacheSize);
+ }
+
+ uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
+ uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+
+ void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
+ C->enqueue(Cb, Ptr, Size);
+ if (C->getSize() > getCacheSize())
+ drain(C, Cb);
+ }
+
+ void NOINLINE drain(CacheT *C, Callback Cb) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
+ recycle(atomic_load_relaxed(&MinSize), Cb);
+ }
+
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ RecyleMutex.lock();
+ recycle(0, Cb);
+ }
+
+ void getStats(ScopedString *Str) const {
+ // It assumes that the world is stopped, just as the allocator's printStats.
+ Cache.getStats(Str);
+ Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
+ getMaxSize() >> 10, getCacheSize() >> 10);
+ }
+
+private:
+ // Read-only data.
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
+ CacheT Cache;
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
+ atomic_uptr MinSize;
+ atomic_uptr MaxSize;
+ alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
+
+ void NOINLINE recycle(uptr MinSize, Callback Cb) {
+ CacheT Tmp;
+ Tmp.init();
+ {
+ ScopedLock L(CacheMutex);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ const uptr CacheSize = Cache.getSize();
+ const uptr OverheadSize = Cache.getOverheadSize();
+ DCHECK_GE(CacheSize, OverheadSize);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ constexpr uptr OverheadThresholdPercents = 100;
+ if (CacheSize > OverheadSize &&
+ OverheadSize * (100 + OverheadThresholdPercents) >
+ CacheSize * OverheadThresholdPercents) {
+ Cache.mergeBatches(&Tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
+ while (Cache.getSize() > MinSize)
+ Tmp.enqueueBatch(Cache.dequeueBatch());
+ }
+ RecyleMutex.unlock();
+ doRecycle(&Tmp, Cb);
+ }
+
+ void NOINLINE doRecycle(CacheT *C, Callback Cb) {
+ while (QuarantineBatch *B = C->dequeueBatch()) {
+ const u32 Seed = static_cast<u32>(
+ (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
+ B->shuffle(Seed);
+ constexpr uptr NumberOfPrefetch = 8UL;
+ CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
+ for (uptr I = 0; I < NumberOfPrefetch; I++)
+ PREFETCH(B->Batch[I]);
+ for (uptr I = 0, Count = B->Count; I < Count; I++) {
+ if (I + NumberOfPrefetch < Count)
+ PREFETCH(B->Batch[I + NumberOfPrefetch]);
+ Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
+ }
+ Cb.deallocate(B);
+ }
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_QUARANTINE_H_
diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
new file mode 100644
index 000000000000..4fe29fde4bde
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -0,0 +1,262 @@
+//===-- release.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_RELEASE_H_
+#define SCUDO_RELEASE_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+class ReleaseRecorder {
+public:
+ ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
+ : BaseAddress(BaseAddress), Data(Data) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ // Releases [From, To) range of pages back to OS.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ releasePagesToOS(BaseAddress, From, Size, Data);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ uptr BaseAddress = 0;
+ MapPlatformData *Data = nullptr;
+};
+
+// A packed array of Counters. Each counter occupies 2^N bits, enough to store
+// counter's MaxValue. Ctor will try to allocate the required Buffer via map()
+// and the caller is expected to check whether the initialization was successful
+// by checking isAllocated() result. For the performance sake, none of the
+// accessors check the validity of the arguments, It is assumed that Index is
+// always in [0, N) range and the value is not incremented past MaxValue.
+class PackedCounterArray {
+public:
+ PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
+ CHECK_GT(NumCounters, 0);
+ CHECK_GT(MaxValue, 0);
+ constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's Index and offset.
+ const uptr CounterSizeBits =
+ roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ CHECK_LE(CounterSizeBits, MaxCounterBits);
+ CounterSizeBitsLog = getLog2(CounterSizeBits);
+ CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
+
+ const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
+ CHECK_GT(PackingRatio, 0);
+ PackingRatioLog = getLog2(PackingRatio);
+ BitOffsetMask = PackingRatio - 1;
+
+ BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
+ PackingRatioLog) *
+ sizeof(*Buffer);
+ Buffer = reinterpret_cast<uptr *>(
+ map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ }
+ ~PackedCounterArray() {
+ if (isAllocated())
+ unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+ }
+
+ bool isAllocated() const { return !!Buffer; }
+
+ uptr getCount() const { return N; }
+
+ uptr get(uptr I) const {
+ DCHECK_LT(I, N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ return (Buffer[Index] >> BitOffset) & CounterMask;
+ }
+
+ void inc(uptr I) const {
+ DCHECK_LT(get(I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
+ }
+
+ void incRange(uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ for (uptr I = From; I <= To; I++)
+ inc(I);
+ }
+
+ uptr getBufferSize() const { return BufferSize; }
+
+private:
+ const uptr N;
+ uptr CounterSizeBitsLog;
+ uptr CounterMask;
+ uptr PackingRatioLog;
+ uptr BitOffsetMask;
+
+ uptr BufferSize;
+ uptr *Buffer;
+};
+
+template <class ReleaseRecorderT> class FreePagesRangeTracker {
+public:
+ explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+ : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
+
+ void processNextPage(bool Freed) {
+ if (Freed) {
+ if (!InRange) {
+ CurrentRangeStatePage = CurrentPage;
+ InRange = true;
+ }
+ } else {
+ closeOpenedRange();
+ }
+ CurrentPage++;
+ }
+
+ void finish() { closeOpenedRange(); }
+
+private:
+ void closeOpenedRange() {
+ if (InRange) {
+ Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
+ InRange = false;
+ }
+ }
+
+ ReleaseRecorderT *const Recorder;
+ const uptr PageSizeLog;
+ bool InRange = false;
+ uptr CurrentPage = 0;
+ uptr CurrentRangeStatePage = 0;
+};
+
+template <class TransferBatchT, class ReleaseRecorderT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
+ uptr AllocatedPagesCount, uptr BlockSize,
+ ReleaseRecorderT *Recorder) {
+ const uptr PageSize = getPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
+ } else {
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
+ }
+
+ PackedCounterArray Counters(AllocatedPagesCount, FullPagesBlockCountMax);
+ if (!Counters.isAllocated())
+ return;
+
+ const uptr PageSizeLog = getLog2(PageSize);
+ const uptr End = Base + AllocatedPagesCount * PageSize;
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.inc((P - Base) >> PageSizeLog);
+ }
+ }
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.incRange((P - Base) >> PageSizeLog,
+ (P - Base + BlockSize - 1) >> PageSizeLog);
+ }
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk Counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
+ if (SameBlockCountPerPage) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr I = 0; I < Counters.getCount(); I++)
+ RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
+ } else {
+ // Slow path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
+ const uptr Pnc = Pn * BlockSize;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr PrevPageBoundary = 0;
+ uptr CurrentBoundary = 0;
+ for (uptr I = 0; I < Counters.getCount(); I++) {
+ const uptr PageBoundary = PrevPageBoundary + PageSize;
+ uptr BlocksPerPage = Pn;
+ if (CurrentBoundary < PageBoundary) {
+ if (CurrentBoundary > PrevPageBoundary)
+ BlocksPerPage++;
+ CurrentBoundary += Pnc;
+ if (CurrentBoundary < PageBoundary) {
+ BlocksPerPage++;
+ CurrentBoundary += BlockSize;
+ }
+ }
+ PrevPageBoundary = PageBoundary;
+
+ RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
+ }
+ }
+ RangeTracker.finish();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_RELEASE_H_
diff --git a/compiler-rt/lib/scudo/standalone/report.cpp b/compiler-rt/lib/scudo/standalone/report.cpp
new file mode 100644
index 000000000000..12d851ff019a
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/report.cpp
@@ -0,0 +1,192 @@
+//===-- report.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "report.h"
+
+#include "atomic_helpers.h"
+#include "string_utils.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedErrorReport {
+public:
+ ScopedErrorReport() : Message(512) { Message.append("Scudo ERROR: "); }
+ void append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ Message.append(Format, Args);
+ va_end(Args);
+ }
+ NORETURN ~ScopedErrorReport() {
+ outputRaw(Message.data());
+ setAbortMessage(Message.data());
+ die();
+ }
+
+private:
+ ScopedString Message;
+};
+
+INLINE void NORETURN trap() { __builtin_trap(); }
+
+// This could potentially be called recursively if a CHECK fails in the reports.
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2) {
+ static atomic_u32 NumberOfCalls;
+ if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
+ // TODO(kostyak): maybe sleep here?
+ trap();
+ }
+ ScopedErrorReport Report;
+ Report.append("CHECK failed @ %s:%d %s (%llu, %llu)\n", File, Line, Condition,
+ Value1, Value2);
+}
+
+// Generic string fatal error message.
+void NORETURN reportError(const char *Message) {
+ ScopedErrorReport Report;
+ Report.append("%s\n", Message);
+}
+
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
+ ScopedErrorReport Report;
+ Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
+}
+
+// The checksum of a chunk header is invalid. This could be caused by an
+// {over,under}write of the header, a pointer that is not an actual chunk.
+void NORETURN reportHeaderCorruption(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("corrupted chunk header at address %p\n", Ptr);
+}
+
+// Two threads have attempted to modify a chunk header at the same time. This is
+// symptomatic of a race-condition in the application code, or general lack of
+// proper locking.
+void NORETURN reportHeaderRace(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("race on chunk header at address %p\n", Ptr);
+}
+
+// The allocator was compiled with parameters that conflict with field size
+// requirements.
+void NORETURN reportSanityCheckError(const char *Field) {
+ ScopedErrorReport Report;
+ Report.append("maximum possible %s doesn't fit in header\n", Field);
+}
+
+// We enforce a maximum alignment, to keep fields smaller and generally prevent
+// integer overflows, or unexpected corner cases.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
+ ScopedErrorReport Report;
+ Report.append("invalid allocation alignment: %zu exceeds maximum supported "
+ "alignment of %zu\n",
+ Alignment, MaxAlignment);
+}
+
+// See above, we also enforce a maximum size.
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ ScopedErrorReport Report;
+ Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
+ "maximum supported size of %zu\n",
+ UserSize, TotalSize, MaxSize);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ ScopedErrorReport Report;
+ Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
+}
+
+static const char *stringifyAction(AllocatorAction Action) {
+ switch (Action) {
+ case AllocatorAction::Recycling:
+ return "recycling";
+ case AllocatorAction::Deallocating:
+ return "deallocating";
+ case AllocatorAction::Reallocating:
+ return "reallocating";
+ case AllocatorAction::Sizing:
+ return "sizing";
+ }
+ return "<invalid action>";
+}
+
+// The chunk is not in a state congruent with the operation we want to perform.
+// This is usually the case with a double-free, a realloc of a freed pointer.
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("invalid chunk state when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("misaligned pointer when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+// The deallocation function used is at odds with the one used to allocate the
+// chunk (eg: new[]/delete or malloc/delete, and so on).
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB) {
+ ScopedErrorReport Report;
+ Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
+ stringifyAction(Action), Ptr, TypeA, TypeB);
+}
+
+// The size specified to the delete operator does not match the one that was
+// passed to new when allocating the chunk.
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
+ uptr ExpectedSize) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
+ Size, ExpectedSize);
+}
+
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid allocation alignment: %zu, alignment must be a power of two\n",
+ Alignment);
+}
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
+ "be represented with type size_t\n",
+ Count, Size);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid alignment requested in posix_memalign: %zu, alignment must be a "
+ "power of two and a multiple of sizeof(void *) == %zu\n",
+ Alignment, sizeof(void *));
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("pvalloc parameters overflow: size %zu rounded up to system "
+ "page size %zu cannot be represented in type size_t\n",
+ Size, getPageSizeCached());
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
+ "must be a power of two and the requested size %zu must be a "
+ "multiple of alignment\n",
+ Alignment, Size);
+}
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/report.h b/compiler-rt/lib/scudo/standalone/report.h
new file mode 100644
index 000000000000..14e4e799b736
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/report.h
@@ -0,0 +1,57 @@
+//===-- report.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_H_
+#define SCUDO_REPORT_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Reports are *fatal* unless stated otherwise.
+
+// Generic error.
+void NORETURN reportError(const char *Message);
+
+// Flags related errors.
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
+
+// Chunk header related errors.
+void NORETURN reportHeaderCorruption(void *Ptr);
+void NORETURN reportHeaderRace(void *Ptr);
+
+// Sanity checks related error.
+void NORETURN reportSanityCheckError(const char *Field);
+
+// Combined allocator errors.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+enum class AllocatorAction : u8 {
+ Recycling,
+ Deallocating,
+ Reallocating,
+ Sizing,
+};
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB);
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
+
+// C wrappers errors.
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+
+} // namespace scudo
+
+#endif // SCUDO_REPORT_H_
diff --git a/compiler-rt/lib/scudo/standalone/secondary.cpp b/compiler-rt/lib/scudo/standalone/secondary.cpp
new file mode 100644
index 000000000000..db7361d7134a
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/secondary.cpp
@@ -0,0 +1,135 @@
+//===-- secondary.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "string_utils.h"
+
+namespace scudo {
+
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
+ DCHECK_GT(Size, AlignmentHint);
+ const uptr PageSize = getPageSizeCached();
+ const uptr MapSize =
+ roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
+ MapPlatformData Data = {};
+ uptr MapBase =
+ reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+ MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ if (UNLIKELY(!MapBase))
+ return nullptr;
+ uptr CommitBase = MapBase + PageSize;
+ uptr MapEnd = MapBase + MapSize;
+
+ // In the unlikely event of alignments larger than a page, adjust the amount
+ // of memory we want to commit, and trim the extra memory.
+ if (UNLIKELY(AlignmentHint >= PageSize)) {
+ // For alignments greater than or equal to a page, the user pointer (eg: the
+ // pointer that is returned by the C or C++ allocation APIs) ends up on a
+ // page boundary , and our headers will live in the preceding page.
+ CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+ const uptr NewMapBase = CommitBase - PageSize;
+ DCHECK_GE(NewMapBase, MapBase);
+ // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+ // are less constrained memory wise, and that saves us two syscalls.
+ if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+ unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MapBase = NewMapBase;
+ }
+ const uptr NewMapEnd = CommitBase + PageSize +
+ roundUpTo((Size - AlignmentHint), PageSize) +
+ PageSize;
+ DCHECK_LE(NewMapEnd, MapEnd);
+ if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+ unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MapEnd = NewMapEnd;
+ }
+ }
+
+ const uptr CommitSize = MapEnd - PageSize - CommitBase;
+ const uptr Ptr =
+ reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+ CommitSize, "scudo:secondary", 0, &Data));
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+ H->MapBase = MapBase;
+ H->MapSize = MapEnd - MapBase;
+ H->BlockEnd = CommitBase + CommitSize;
+ H->Data = Data;
+ {
+ ScopedLock L(Mutex);
+ if (LIKELY(Tail)) {
+ Tail->Next = H;
+ H->Prev = Tail;
+ }
+ Tail = H;
+ AllocatedBytes += CommitSize;
+ if (LargestSize < CommitSize)
+ LargestSize = CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, CommitSize);
+ Stats.add(StatMapped, H->MapSize);
+ }
+ if (BlockEnd)
+ *BlockEnd = CommitBase + CommitSize;
+ return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+void MapAllocator::deallocate(void *Ptr) {
+ LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+ {
+ ScopedLock L(Mutex);
+ LargeBlock::Header *Prev = H->Prev;
+ LargeBlock::Header *Next = H->Next;
+ if (Prev) {
+ CHECK_EQ(Prev->Next, H);
+ Prev->Next = Next;
+ }
+ if (Next) {
+ CHECK_EQ(Next->Prev, H);
+ Next->Prev = Prev;
+ }
+ if (UNLIKELY(Tail == H)) {
+ CHECK(!Next);
+ Tail = Prev;
+ } else {
+ CHECK(Next);
+ }
+ const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+ FreedBytes += CommitSize;
+ NumberOfFrees++;
+ Stats.sub(StatAllocated, CommitSize);
+ Stats.sub(StatMapped, H->MapSize);
+ }
+ void *Addr = reinterpret_cast<void *>(H->MapBase);
+ const uptr Size = H->MapSize;
+ MapPlatformData Data;
+ Data = H->Data;
+ unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+void MapAllocator::getStats(ScopedString *Str) const {
+ Str->append(
+ "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
+ "(%zuK), remains %zu (%zuK) max %zuM\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+ NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+ LargestSize >> 20);
+}
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
new file mode 100644
index 000000000000..9d074a57c772
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -0,0 +1,98 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct Header {
+ LargeBlock::Header *Prev;
+ LargeBlock::Header *Next;
+ uptr BlockEnd;
+ uptr MapBase;
+ uptr MapSize;
+ MapPlatformData Data;
+};
+
+constexpr uptr getHeaderSize() {
+ return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+
+static Header *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+class MapAllocator {
+public:
+ void initLinkerInitialized(GlobalStats *S) {
+ Stats.initLinkerInitialized();
+ if (LIKELY(S))
+ S->link(&Stats);
+ }
+ void init(GlobalStats *S) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(S);
+ }
+
+ void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+
+ void deallocate(void *Ptr);
+
+ static uptr getBlockEnd(void *Ptr) {
+ return LargeBlock::getHeader(Ptr)->BlockEnd;
+ }
+
+ static uptr getBlockSize(void *Ptr) {
+ return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+ }
+
+ void getStats(ScopedString *Str) const;
+
+ void disable() { Mutex.lock(); }
+
+ void enable() { Mutex.unlock(); }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
+ Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+ }
+
+private:
+ HybridMutex Mutex;
+ LargeBlock::Header *Tail;
+ uptr AllocatedBytes;
+ uptr FreedBytes;
+ uptr LargestSize;
+ u32 NumberOfAllocs;
+ u32 NumberOfFrees;
+ LocalStats Stats;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
diff --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h
new file mode 100644
index 000000000000..dfef0865b9d9
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -0,0 +1,151 @@
+//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SIZE_CLASS_MAP_H_
+#define SCUDO_SIZE_CLASS_MAP_H_
+
+#include "common.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassMap maps allocation sizes into size classes and back, in an
+// efficient table-free manner.
+//
+// Class 0 is a special class that doesn't abide by the same rules as other
+// classes. The allocator uses it to hold batches.
+//
+// The other sizes are controlled by the template parameters:
+// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
+// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
+// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
+// 2^MidSizeLog bytes.
+// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
+// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
+// 0b1xx0..0 (where x is either 0 or 1).
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that can be cached per-thread:
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+
+template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
+ u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr MinSize = 1UL << MinSizeLog;
+ static const uptr MidSize = 1UL << MidSizeLog;
+ static const uptr MidClass = MidSize / MinSize;
+ static const u8 S = NumBits - 1;
+ static const uptr M = (1UL << S) - 1;
+
+public:
+ static const u32 MaxNumCachedHint = MaxNumCachedHintT;
+
+ static const uptr MaxSize = 1UL << MaxSizeLog;
+ static const uptr NumClasses =
+ MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
+ COMPILER_CHECK(NumClasses <= 256);
+ static const uptr LargestClassId = NumClasses - 1;
+ static const uptr BatchClassId = 0;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ DCHECK_NE(ClassId, BatchClassId);
+ if (ClassId <= MidClass)
+ return ClassId << MinSizeLog;
+ ClassId -= MidClass;
+ const uptr T = MidSize << (ClassId >> S);
+ return T + (T >> S) * (ClassId & M);
+ }
+
+ static uptr getClassIdBySize(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ if (Size <= MidSize)
+ return (Size + MinSize - 1) >> MinSizeLog;
+ const uptr L = getMostSignificantSetBitIndex(Size);
+ const uptr HBits = (Size >> (L - S)) & M;
+ const uptr LBits = Size & ((1UL << (L - S)) - 1);
+ const uptr L1 = L - MidSizeLog;
+ return MidClass + (L1 << S) + HBits + (LBits > 0);
+ }
+
+ static u32 getMaxCachedHint(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ DCHECK_NE(Size, 0);
+ u32 N;
+ // Force a 32-bit division if the template parameters allow for it.
+ if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
+ N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
+ else
+ N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
+ return Max(1U, Min(MaxNumCachedHint, N));
+ }
+
+ static void print() {
+ ScopedString Buffer(1024);
+ uptr PrevS = 0;
+ uptr TotalCached = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == BatchClassId)
+ continue;
+ const uptr S = getSizeByClassId(I);
+ if (S >= MidSize / 2 && (S & (S - 1)) == 0)
+ Buffer.append("\n");
+ const uptr D = S - PrevS;
+ const uptr P = PrevS ? (D * 100 / PrevS) : 0;
+ const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
+ const uptr Cached = getMaxCachedHint(S) * S;
+ Buffer.append(
+ "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
+ I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
+ getClassIdBySize(S));
+ TotalCached += Cached;
+ PrevS = S;
+ }
+ Buffer.append("Total Cached: %zu\n", TotalCached);
+ Buffer.output();
+ }
+
+ static void validate() {
+ for (uptr C = 0; C < NumClasses; C++) {
+ if (C == BatchClassId)
+ continue;
+ const uptr S = getSizeByClassId(C);
+ CHECK_NE(S, 0U);
+ CHECK_EQ(getClassIdBySize(S), C);
+ if (C < LargestClassId)
+ CHECK_EQ(getClassIdBySize(S + 1), C + 1);
+ CHECK_EQ(getClassIdBySize(S - 1), C);
+ CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1));
+ }
+ // Do not perform the loop if the maximum size is too large.
+ if (MaxSizeLog > 19)
+ return;
+ for (uptr S = 1; S <= MaxSize; S++) {
+ const uptr C = getClassIdBySize(S);
+ CHECK_LT(C, NumClasses);
+ CHECK_GE(getSizeByClassId(C), S);
+ if (C > 0)
+ CHECK_LT(getSizeByClassId(C - 1), S);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
+
+// TODO(kostyak): further tune class maps for Android & Fuchsia.
+#if SCUDO_WORDSIZE == 64U
+typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
+#else
+typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_SIZE_CLASS_MAP_H_
diff --git a/compiler-rt/lib/scudo/standalone/stats.h b/compiler-rt/lib/scudo/standalone/stats.h
new file mode 100644
index 000000000000..16ef5b89b854
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/stats.h
@@ -0,0 +1,105 @@
+//===-- stats.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STATS_H_
+#define SCUDO_STATS_H_
+
+#include "atomic_helpers.h"
+#include "mutex.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// Memory allocator statistics
+enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
+
+typedef uptr StatCounters[StatCount];
+
+// Per-thread stats, live in per-thread cache. We use atomics so that the
+// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
+// lock, because those are expensive operations , and we only care for the stats
+// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
+// LocalStats::add'ing, this is OK, we will still get a meaningful number.
+class LocalStats {
+public:
+ void initLinkerInitialized() {}
+ void init() { memset(this, 0, sizeof(*this)); }
+
+ void add(StatType I, uptr V) {
+ V += atomic_load_relaxed(&StatsArray[I]);
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void sub(StatType I, uptr V) {
+ V = atomic_load_relaxed(&StatsArray[I]) - V;
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
+
+ uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
+
+private:
+ friend class GlobalStats;
+ atomic_uptr StatsArray[StatCount];
+ LocalStats *Next;
+ LocalStats *Prev;
+};
+
+// Global stats, used for aggregation and querying.
+class GlobalStats : public LocalStats {
+public:
+ void initLinkerInitialized() {
+ Next = this;
+ Prev = this;
+ }
+ void init() {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized();
+ }
+
+ void link(LocalStats *S) {
+ ScopedLock L(Mutex);
+ S->Next = Next;
+ S->Prev = this;
+ Next->Prev = S;
+ Next = S;
+ }
+
+ void unlink(LocalStats *S) {
+ ScopedLock L(Mutex);
+ S->Prev->Next = S->Next;
+ S->Next->Prev = S->Prev;
+ for (uptr I = 0; I < StatCount; I++)
+ add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
+ }
+
+ void get(uptr *S) const {
+ memset(S, 0, StatCount * sizeof(uptr));
+ ScopedLock L(Mutex);
+ const LocalStats *Stats = this;
+ for (;;) {
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] += Stats->get(static_cast<StatType>(I));
+ Stats = Stats->Next;
+ if (Stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
+ }
+
+private:
+ mutable HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_STATS_H_
diff --git a/compiler-rt/lib/scudo/standalone/string_utils.cpp b/compiler-rt/lib/scudo/standalone/string_utils.cpp
new file mode 100644
index 000000000000..5de8b57bfcd1
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -0,0 +1,244 @@
+//===-- string_utils.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "string_utils.h"
+#include "common.h"
+
+#include <stdarg.h>
+#include <string.h>
+
+namespace scudo {
+
+static int appendChar(char **Buffer, const char *BufferEnd, char C) {
+ if (*Buffer < BufferEnd) {
+ **Buffer = C;
+ (*Buffer)++;
+ }
+ return 1;
+}
+
+// Appends number in a given Base to buffer. If its length is less than
+// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
+// on the value of |PadWithZero|.
+static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
+ u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Negative, bool Upper) {
+ constexpr uptr MaxLen = 30;
+ RAW_CHECK(Base == 10 || Base == 16);
+ RAW_CHECK(Base == 10 || !Negative);
+ RAW_CHECK(AbsoluteValue || !Negative);
+ RAW_CHECK(MinNumberLength < MaxLen);
+ int Res = 0;
+ if (Negative && MinNumberLength)
+ --MinNumberLength;
+ if (Negative && PadWithZero)
+ Res += appendChar(Buffer, BufferEnd, '-');
+ uptr NumBuffer[MaxLen];
+ int Pos = 0;
+ do {
+ RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
+ "appendNumber buffer overflow");
+ NumBuffer[Pos++] = static_cast<uptr>(AbsoluteValue % Base);
+ AbsoluteValue /= Base;
+ } while (AbsoluteValue > 0);
+ if (Pos < MinNumberLength) {
+ memset(&NumBuffer[Pos], 0,
+ sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
+ Pos = MinNumberLength;
+ }
+ RAW_CHECK(Pos > 0);
+ Pos--;
+ for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
+ char c = (PadWithZero || Pos == 0) ? '0' : ' ';
+ Res += appendChar(Buffer, BufferEnd, c);
+ }
+ if (Negative && !PadWithZero)
+ Res += appendChar(Buffer, BufferEnd, '-');
+ for (; Pos >= 0; Pos--) {
+ char Digit = static_cast<char>(NumBuffer[Pos]);
+ Digit = static_cast<char>((Digit < 10) ? '0' + Digit
+ : (Upper ? 'A' : 'a') + Digit - 10);
+ Res += appendChar(Buffer, BufferEnd, Digit);
+ }
+ return Res;
+}
+
+static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
+ u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Upper) {
+ return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
+ PadWithZero, /*Negative=*/false, Upper);
+}
+
+static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
+ u8 MinNumberLength, bool PadWithZero) {
+ const bool Negative = (Num < 0);
+ return appendNumber(Buffer, BufferEnd,
+ static_cast<u64>(Negative ? -Num : Num), 10,
+ MinNumberLength, PadWithZero, Negative,
+ /*Upper=*/false);
+}
+
+// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
+// interpret Width == 0 as "no Width requested":
+// Width == 0 - no Width requested
+// Width < 0 - left-justify S within and pad it to -Width chars, if necessary
+// Width > 0 - right-justify S, not implemented yet
+static int appendString(char **Buffer, const char *BufferEnd, int Width,
+ int MaxChars, const char *S) {
+ if (!S)
+ S = "<null>";
+ int Res = 0;
+ for (; *S; S++) {
+ if (MaxChars >= 0 && Res >= MaxChars)
+ break;
+ Res += appendChar(Buffer, BufferEnd, *S);
+ }
+ // Only the left justified strings are supported.
+ while (Width < -Res)
+ Res += appendChar(Buffer, BufferEnd, ' ');
+ return Res;
+}
+
+static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
+ int Res = 0;
+ Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
+ Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
+ SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
+ /*Upper=*/false);
+ return Res;
+}
+
+int formatString(char *Buffer, uptr BufferLength, const char *Format,
+ va_list Args) {
+ static const char *PrintfFormatsHelp =
+ "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ RAW_CHECK(Format);
+ RAW_CHECK(BufferLength > 0);
+ const char *BufferEnd = &Buffer[BufferLength - 1];
+ const char *Cur = Format;
+ int Res = 0;
+ for (; *Cur; Cur++) {
+ if (*Cur != '%') {
+ Res += appendChar(&Buffer, BufferEnd, *Cur);
+ continue;
+ }
+ Cur++;
+ const bool LeftJustified = *Cur == '-';
+ if (LeftJustified)
+ Cur++;
+ bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
+ const bool PadWithZero = (*Cur == '0');
+ u8 Width = 0;
+ if (HaveWidth) {
+ while (*Cur >= '0' && *Cur <= '9')
+ Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
+ }
+ const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
+ int Precision = -1;
+ if (HavePrecision) {
+ Cur += 2;
+ Precision = va_arg(Args, int);
+ }
+ const bool HaveZ = (*Cur == 'z');
+ Cur += HaveZ;
+ const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
+ Cur += HaveLL * 2;
+ s64 DVal;
+ u64 UVal;
+ const bool HaveLength = HaveZ || HaveLL;
+ const bool HaveFlags = HaveWidth || HaveLength;
+ // At the moment only %s supports precision and left-justification.
+ CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
+ switch (*Cur) {
+ case 'd': {
+ DVal = HaveLL ? va_arg(Args, s64)
+ : HaveZ ? va_arg(Args, sptr) : va_arg(Args, int);
+ Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ UVal = HaveLL ? va_arg(Args, u64)
+ : HaveZ ? va_arg(Args, uptr) : va_arg(Args, unsigned);
+ const bool Upper = (*Cur == 'X');
+ Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
+ Width, PadWithZero, Upper);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
+ // Only left-justified Width is supported.
+ CHECK(!HaveWidth || LeftJustified);
+ Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
+ Precision, va_arg(Args, char *));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res +=
+ appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
+ break;
+ }
+ case '%': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res += appendChar(&Buffer, BufferEnd, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, PrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(Buffer <= BufferEnd);
+ appendChar(&Buffer, BufferEnd + 1, '\0');
+ return Res;
+}
+
+void ScopedString::append(const char *Format, va_list Args) {
+ DCHECK_LT(Length, String.size());
+ va_list ArgsCopy;
+ va_copy(ArgsCopy, Args);
+ // formatString doesn't currently support a null buffer or zero buffer length,
+ // so in order to get the resulting formatted string length, we use a one-char
+ // buffer.
+ char C[1];
+ const uptr AdditionalLength =
+ static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
+ String.resize(Length + AdditionalLength);
+ formatString(String.data() + Length, AdditionalLength, Format, ArgsCopy);
+ Length = strlen(String.data());
+ CHECK_LT(Length, String.size());
+}
+
+FORMAT(2, 3)
+void ScopedString::append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ append(Format, Args);
+ va_end(Args);
+}
+
+FORMAT(1, 2)
+void Printf(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ ScopedString Msg(1024);
+ Msg.append(Format, Args);
+ outputRaw(Msg.data());
+ va_end(Args);
+}
+
+} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/string_utils.h b/compiler-rt/lib/scudo/standalone/string_utils.h
new file mode 100644
index 000000000000..acd60bda9d8d
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -0,0 +1,43 @@
+//===-- string_utils.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STRING_UTILS_H_
+#define SCUDO_STRING_UTILS_H_
+
+#include "internal_defs.h"
+#include "vector.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedString {
+public:
+ explicit ScopedString(uptr MaxLength) : String(MaxLength), Length(0) {
+ String[0] = '\0';
+ }
+ uptr length() { return Length; }
+ const char *data() { return String.data(); }
+ void clear() {
+ String[0] = '\0';
+ Length = 0;
+ }
+ void append(const char *Format, va_list Args);
+ void append(const char *Format, ...);
+ void output() const { outputRaw(String.data()); }
+
+private:
+ Vector<char> String;
+ uptr Length;
+};
+
+void Printf(const char *Format, ...);
+
+} // namespace scudo
+
+#endif // SCUDO_STRING_UTILS_H_
diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h
new file mode 100644
index 000000000000..f24ff01960fb
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/tsd.h
@@ -0,0 +1,66 @@
+//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
+
+// With some build setups, this might still not be defined.
+#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
+#define PTHREAD_DESTRUCTOR_ITERATIONS 4
+#endif
+
+namespace scudo {
+
+template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
+ typename Allocator::CacheT Cache;
+ typename Allocator::QuarantineCacheT QuarantineCache;
+ u8 DestructorIterations;
+
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initCache(&Cache);
+ DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+ INLINE bool tryLock() {
+ if (Mutex.tryLock()) {
+ atomic_store_relaxed(&Precedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(
+ &Precedence,
+ static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+ return false;
+ }
+ INLINE void lock() {
+ atomic_store_relaxed(&Precedence, 0);
+ Mutex.lock();
+ }
+ INLINE void unlock() { Mutex.unlock(); }
+ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+private:
+ HybridMutex Mutex;
+ atomic_uptr Precedence;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_H_
diff --git a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
new file mode 100644
index 000000000000..971ae4857fca
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -0,0 +1,118 @@
+//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_EXCLUSIVE_H_
+#define SCUDO_TSD_EXCLUSIVE_H_
+
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+enum class ThreadState : u8 {
+ NotInitialized = 0,
+ Initialized,
+ TornDown,
+};
+
+template <class Allocator> void teardownThread(void *Ptr);
+
+template <class Allocator> struct TSDRegistryExT {
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initLinkerInitialized();
+ CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
+ FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
+ map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
+ FallbackTSD->initLinkerInitialized(Instance);
+ Initialized = true;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
+ if (LIKELY(State != ThreadState::NotInitialized))
+ return;
+ initThread(Instance, MinimalInit);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ if (LIKELY(State == ThreadState::Initialized)) {
+ *UnlockRequired = false;
+ return &ThreadTSD;
+ }
+ DCHECK(FallbackTSD);
+ FallbackTSD->lock();
+ *UnlockRequired = true;
+ return FallbackTSD;
+ }
+
+private:
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (LIKELY(Initialized))
+ return;
+ initLinkerInitialized(Instance); // Sets Initialized.
+ }
+
+ // Using minimal initialization allows for global initialization while keeping
+ // the thread specific structure untouched. The fallback structure will be
+ // used instead.
+ NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
+ initOnceMaybe(Instance);
+ if (UNLIKELY(MinimalInit))
+ return;
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
+ ThreadTSD.initLinkerInitialized(Instance);
+ State = ThreadState::Initialized;
+ }
+
+ pthread_key_t PThreadKey;
+ bool Initialized;
+ TSD<Allocator> *FallbackTSD;
+ HybridMutex Mutex;
+ static THREADLOCAL ThreadState State;
+ static THREADLOCAL TSD<Allocator> ThreadTSD;
+
+ friend void teardownThread<Allocator>(void *Ptr);
+};
+
+template <class Allocator>
+THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
+template <class Allocator>
+THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
+
+template <class Allocator> void teardownThread(void *Ptr) {
+ typedef TSDRegistryExT<Allocator> TSDRegistryT;
+ Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
+ // The glibc POSIX thread-local-storage deallocation routine calls user
+ // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+ // We want to be called last since other destructors might call free and the
+ // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+ // quarantine and swallowing the cache.
+ if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
+ TSDRegistryT::ThreadTSD.DestructorIterations--;
+ // If pthread_setspecific fails, we will go ahead with the teardown.
+ if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
+ Ptr) == 0))
+ return;
+ }
+ TSDRegistryT::ThreadTSD.commitBack(Instance);
+ TSDRegistryT::State = ThreadState::TornDown;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE_H_
diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h
new file mode 100644
index 000000000000..da88a897b8f5
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -0,0 +1,168 @@
+//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_SHARED_H_
+#define SCUDO_TSD_SHARED_H_
+
+#include "linux.h" // for getAndroidTlsPtr()
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initLinkerInitialized();
+ CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
+ NumberOfTSDs = Min(Max(1U, getNumberOfCPUs()), MaxTSDCount);
+ TSDs = reinterpret_cast<TSD<Allocator> *>(
+ map(nullptr, sizeof(TSD<Allocator>) * NumberOfTSDs, "scudo:tsd"));
+ for (u32 I = 0; I < NumberOfTSDs; I++)
+ TSDs[I].initLinkerInitialized(Instance);
+ // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
+ // array of TSDs in a random order. For details, see:
+ // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
+ for (u32 I = 0; I < NumberOfTSDs; I++) {
+ u32 A = I + 1;
+ u32 B = NumberOfTSDs;
+ // Find the GCD between I + 1 and NumberOfTSDs. If 1, they are coprimes.
+ while (B != 0) {
+ const u32 T = A;
+ A = B;
+ B = T % B;
+ }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+ Initialized = true;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(TSDs),
+ sizeof(TSD<Allocator>) * NumberOfTSDs);
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
+ UNUSED bool MinimalInit) {
+ if (LIKELY(getCurrentTSD()))
+ return;
+ initThread(Instance);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ TSD<Allocator> *TSD = getCurrentTSD();
+ DCHECK(TSD);
+ *UnlockRequired = true;
+ // Try to lock the currently associated context.
+ if (TSD->tryLock())
+ return TSD;
+ // If that fails, go down the slow path.
+ return getTSDAndLockSlow(TSD);
+ }
+
+private:
+ ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
+#if SCUDO_ANDROID
+ *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
+#elif SCUDO_LINUX
+ ThreadTSD = CurrentTSD;
+#else
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)),
+ 0);
+#endif
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
+#if SCUDO_ANDROID
+ return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
+#elif SCUDO_LINUX
+ return ThreadTSD;
+#else
+ return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey));
+#endif
+ }
+
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (LIKELY(Initialized))
+ return;
+ initLinkerInitialized(Instance); // Sets Initialized.
+ }
+
+ NOINLINE void initThread(Allocator *Instance) {
+ initOnceMaybe(Instance);
+ // Initial context assignment is done in a plain round-robin fashion.
+ const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
+ setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+ }
+
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+ if (MaxTSDCount > 1U && NumberOfTSDs > 1U) {
+ // Use the Precedence of the current TSD as our random seed. Since we are
+ // in the slow path, it means that tryLock failed, and as a result it's
+ // very likely that said Precedence is non-zero.
+ const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
+ const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+ u32 Index = R % NumberOfTSDs;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ TSD<Allocator> *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
+ }
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (Precedence && Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
+ LowestPrecedence = Precedence;
+ }
+ Index += Inc;
+ if (Index >= NumberOfTSDs)
+ Index -= NumberOfTSDs;
+ }
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
+ }
+ }
+ // Last resort, stick with the current one.
+ CurrentTSD->lock();
+ return CurrentTSD;
+ }
+
+ pthread_key_t PThreadKey;
+ atomic_u32 CurrentIndex;
+ u32 NumberOfTSDs;
+ TSD<Allocator> *TSDs;
+ u32 NumberOfCoPrimes;
+ u32 CoPrimes[MaxTSDCount];
+ bool Initialized;
+ HybridMutex Mutex;
+#if SCUDO_LINUX && !SCUDO_ANDROID
+ static THREADLOCAL TSD<Allocator> *ThreadTSD;
+#endif
+};
+
+#if SCUDO_LINUX && !SCUDO_ANDROID
+template <class Allocator, u32 MaxTSDCount>
+THREADLOCAL TSD<Allocator>
+ *TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_SHARED_H_
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
new file mode 100644
index 000000000000..3cb4005ed29c
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -0,0 +1,118 @@
+//===-- vector.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_VECTOR_H_
+#define SCUDO_VECTOR_H_
+
+#include "common.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// A low-level vector based on map. May incur a significant memory overhead for
+// small vectors. The current implementation supports only POD types.
+template <typename T> class VectorNoCtor {
+public:
+ void init(uptr InitialCapacity) {
+ CapacityBytes = 0;
+ Size = 0;
+ Data = nullptr;
+ reserve(InitialCapacity);
+ }
+ void destroy() {
+ if (Data)
+ unmap(Data, CapacityBytes);
+ }
+ T &operator[](uptr I) {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ const T &operator[](uptr I) const {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ void push_back(const T &Element) {
+ DCHECK_LE(Size, capacity());
+ if (Size == capacity()) {
+ const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+ reallocate(NewCapacity);
+ }
+ memcpy(&Data[Size++], &Element, sizeof(T));
+ }
+ T &back() {
+ DCHECK_GT(Size, 0);
+ return Data[Size - 1];
+ }
+ void pop_back() {
+ DCHECK_GT(Size, 0);
+ Size--;
+ }
+ uptr size() const { return Size; }
+ const T *data() const { return Data; }
+ T *data() { return Data; }
+ uptr capacity() const { return CapacityBytes / sizeof(T); }
+ void reserve(uptr NewSize) {
+ // Never downsize internal buffer.
+ if (NewSize > capacity())
+ reallocate(NewSize);
+ }
+ void resize(uptr NewSize) {
+ if (NewSize > Size) {
+ reserve(NewSize);
+ memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
+ }
+ Size = NewSize;
+ }
+
+ void clear() { Size = 0; }
+ bool empty() const { return size() == 0; }
+
+ const T *begin() const { return data(); }
+ T *begin() { return data(); }
+ const T *end() const { return data() + size(); }
+ T *end() { return data() + size(); }
+
+private:
+ void reallocate(uptr NewCapacity) {
+ DCHECK_GT(NewCapacity, 0);
+ DCHECK_LE(Size, NewCapacity);
+ const uptr NewCapacityBytes =
+ roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+ T *NewData = (T *)map(nullptr, NewCapacityBytes, "scudo:vector");
+ if (Data) {
+ memcpy(NewData, Data, Size * sizeof(T));
+ unmap(Data, CapacityBytes);
+ }
+ Data = NewData;
+ CapacityBytes = NewCapacityBytes;
+ }
+
+ T *Data;
+ uptr CapacityBytes;
+ uptr Size;
+};
+
+template <typename T> class Vector : public VectorNoCtor<T> {
+public:
+ Vector() { VectorNoCtor<T>::init(1); }
+ explicit Vector(uptr Count) {
+ VectorNoCtor<T>::init(Count);
+ this->resize(Count);
+ }
+ ~Vector() { VectorNoCtor<T>::destroy(); }
+ // Disallow copies and moves.
+ Vector(const Vector &) = delete;
+ Vector &operator=(const Vector &) = delete;
+ Vector(Vector &&) = delete;
+ Vector &operator=(Vector &&) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_VECTOR_H_
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
new file mode 100644
index 000000000000..dffd7cc26fe8
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
@@ -0,0 +1,39 @@
+//===-- wrappers_c.cpp ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::Config> Allocator;
+// Pointer to the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+scudo::Allocator<scudo::Config> *AllocatorPtr = &Allocator;
+
+extern "C" {
+
+#define SCUDO_PREFIX(name) name
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
+
+} // extern "C"
+
+#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.h b/compiler-rt/lib/scudo/standalone/wrappers_c.h
new file mode 100644
index 000000000000..33a0c53cec03
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.h
@@ -0,0 +1,52 @@
+//===-- wrappers_c.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_WRAPPERS_C_H_
+#define SCUDO_WRAPPERS_C_H_
+
+#include "platform.h"
+#include "stats.h"
+
+// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int).
+#if SCUDO_ANDROID
+typedef size_t __scudo_mallinfo_data_t;
+#else
+typedef int __scudo_mallinfo_data_t;
+#endif
+
+struct __scudo_mallinfo {
+ __scudo_mallinfo_data_t arena;
+ __scudo_mallinfo_data_t ordblks;
+ __scudo_mallinfo_data_t smblks;
+ __scudo_mallinfo_data_t hblks;
+ __scudo_mallinfo_data_t hblkhd;
+ __scudo_mallinfo_data_t usmblks;
+ __scudo_mallinfo_data_t fsmblks;
+ __scudo_mallinfo_data_t uordblks;
+ __scudo_mallinfo_data_t fordblks;
+ __scudo_mallinfo_data_t keepcost;
+};
+
+// Android sometimes includes malloc.h no matter what, which yields to
+// conflicting return types for mallinfo() if we use our own structure. So if
+// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
+#if STRUCT_MALLINFO_DECLARED
+#define SCUDO_MALLINFO mallinfo
+#else
+#define SCUDO_MALLINFO __scudo_mallinfo
+#endif
+
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+#endif // SCUDO_WRAPPERS_C_H_
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
new file mode 100644
index 000000000000..a9adbc83588b
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -0,0 +1,186 @@
+//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PREFIX
+#error "Define SCUDO_PREFIX prior to including this file!"
+#endif
+
+// malloc-type functions have to be aligned to std::max_align_t. This is
+// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
+// do not have to abide by the same requirement.
+#ifndef SCUDO_MALLOC_ALIGNMENT
+#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
+#endif
+
+INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
+ scudo::uptr Product;
+ if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportCallocOverflow(nmemb, size);
+ }
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+}
+
+INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
+ struct SCUDO_MALLINFO Info = {};
+ scudo::StatCounters Stats;
+ SCUDO_ALLOCATOR.getStats(Stats);
+ // Space allocated in mmapped regions (bytes)
+ Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
+ // Maximum total allocated space (bytes)
+ Info.usmblks = Info.hblkhd;
+ // Space in freed fastbin blocks (bytes)
+ Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
+ // Total allocated space (bytes)
+ Info.uordblks =
+ static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
+ // Total free space (bytes)
+ Info.fordblks = Info.fsmblks;
+ return Info;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+}
+
+#if SCUDO_ANDROID
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
+#else
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
+#endif
+ return SCUDO_ALLOCATOR.getUsableSize(ptr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
+ // Android rounds up the alignment to a power of two if it isn't one.
+ if (SCUDO_ANDROID) {
+ if (UNLIKELY(!alignment)) {
+ alignment = 1U;
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
+ alignment = scudo::roundUpToPowerOfTwo(alignment);
+ }
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportAlignmentNotPowerOfTwo(alignment);
+ }
+ }
+ return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+ alignment);
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
+ if (!SCUDO_ALLOCATOR.canReturnNull())
+ scudo::reportInvalidPosixMemalignAlignment(alignment);
+ return EINVAL;
+ }
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ if (UNLIKELY(!Ptr))
+ return ENOMEM;
+ *memptr = Ptr;
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportPvallocOverflow(size);
+ }
+ // pvalloc(0) should allocate one page.
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size ? scudo::roundUpTo(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
+ if (!ptr)
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ if (size == 0) {
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+ return nullptr;
+ }
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
+}
+
+// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate
+// which is somewhat inconsistent with the rest, workaround that.
+#if SCUDO_ANDROID && _BIONIC
+#define SCUDO_ITERATE iterate
+#else
+#define SCUDO_ITERATE malloc_iterate
+#endif
+
+INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)(
+ uintptr_t base, size_t size,
+ void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
+ SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
+ return 0;
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
+ SCUDO_ALLOCATOR.disable();
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+
+INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
+ if (param == M_DECAY_TIME) {
+ // TODO(kostyak): set release_to_os_interval_ms accordingly.
+ return 1;
+ } else if (param == M_PURGE) {
+ SCUDO_ALLOCATOR.releaseToOS();
+ return 1;
+ }
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportInvalidAlignedAllocAlignment(alignment, size);
+ }
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
+ fputs("<malloc version=\"scudo-1\">", stream);
+ fputs("</malloc>", stream);
+ return 0;
+}
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
new file mode 100644
index 000000000000..fa4145c066b6
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -0,0 +1,49 @@
+//===-- wrappers_c_bionic.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// This is only used when compiled as part of Bionic.
+#if SCUDO_ANDROID && _BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::AndroidConfig> Allocator;
+static scudo::Allocator<scudo::AndroidSvelteConfig> SvelteAllocator;
+
+extern "C" {
+
+// Regular MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// Svelte MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
+#define SCUDO_ALLOCATOR SvelteAllocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// The following is the only function that will end up initializing both
+// allocators, which will result in a slight increase in memory footprint.
+INTERFACE void __scudo_print_stats(void) {
+ Allocator.printStats();
+ SvelteAllocator.printStats();
+}
+
+} // extern "C"
+
+#endif // SCUDO_ANDROID && _BIONIC
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
new file mode 100644
index 000000000000..d4370d506e5e
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -0,0 +1,67 @@
+//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKS_H_
+#define SCUDO_CHECKS_H_
+
+#include "common.h"
+
+#include <errno.h>
+
+#ifndef __has_builtin
+#define __has_builtin(X) 0
+#endif
+
+namespace scudo {
+
+// A common errno setting logic shared by almost all Scudo C wrappers.
+INLINE void *setErrnoOnNull(void *Ptr) {
+ if (UNLIKELY(!Ptr))
+ errno = ENOMEM;
+ return Ptr;
+}
+
+// Checks return true on failure.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment.
+INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
+ return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+ !isAligned(Size, Alignment);
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
+ return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+ !isAligned(Alignment, sizeof(void *));
+}
+
+// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
+// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
+// costly division.
+INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
+#if __has_builtin(__builtin_umull_overflow)
+ return __builtin_umull_overflow(Size, N, Product);
+#else
+ *Product = Size * N;
+ if (!Size)
+ return false;
+ return (*Product / Size) != N;
+#endif
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of PageSize.
+INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
+ return roundUpTo(Size, PageSize) < Size;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKS_H_
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
new file mode 100644
index 000000000000..72235e9c9820
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
@@ -0,0 +1,107 @@
+//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+
+#include <stdint.h>
+
+extern scudo::Allocator<scudo::Config> *AllocatorPtr;
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+INTERFACE WEAK void *operator new(size_t size) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+}
+
+INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::nothrow_t const &) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::align_val_t align) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size,
+ std::align_val_t align)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
+ static_cast<scudo::uptr>(align));
+}
+
+#endif // !SCUDO_ANDROID || !_BIONIC