diff options
Diffstat (limited to 'contrib/compiler-rt/lib/asan')
65 files changed, 13450 insertions, 0 deletions
diff --git a/contrib/compiler-rt/lib/asan/README.txt b/contrib/compiler-rt/lib/asan/README.txt new file mode 100644 index 000000000000..bb6ff42c5cde --- /dev/null +++ b/contrib/compiler-rt/lib/asan/README.txt @@ -0,0 +1,26 @@ +AddressSanitizer RT +================================ +This directory contains sources of the AddressSanitizer (ASan) runtime library. + +Directory structure: +README.txt : This file. +Makefile.mk : File for make-based build. +CMakeLists.txt : File for cmake-based build. +asan_*.{cc,h} : Sources of the asan runtime library. +scripts/* : Helper scripts. +tests/* : ASan unit tests. + +Also ASan runtime needs the following libraries: +lib/interception/ : Machinery used to intercept function calls. +lib/sanitizer_common/ : Code shared between various sanitizers. + +ASan runtime currently also embeds part of LeakSanitizer runtime for +leak detection (lib/lsan/lsan_common.{cc,h}). + +ASan runtime can only be built by CMake. You can run ASan tests +from the root of your CMake build tree: + +make check-asan + +For more instructions see: +https://github.com/google/sanitizers/wiki/AddressSanitizerHowToBuild diff --git a/contrib/compiler-rt/lib/asan/asan.syms.extra b/contrib/compiler-rt/lib/asan/asan.syms.extra new file mode 100644 index 000000000000..f8e9b3aedcc3 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan.syms.extra @@ -0,0 +1,4 @@ +__asan_* +__lsan_* +__ubsan_* +__sancov_* diff --git a/contrib/compiler-rt/lib/asan/asan_activation.cc b/contrib/compiler-rt/lib/asan/asan_activation.cc new file mode 100644 index 000000000000..d642be93488d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_activation.cc @@ -0,0 +1,144 @@ +//===-- asan_activation.cc --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan activation/deactivation logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_allocator.h" +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +static struct AsanDeactivatedFlags { + AllocatorOptions allocator_options; + int malloc_context_size; + bool poison_heap; + bool coverage; + const char *coverage_dir; + + void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) { +#define ASAN_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &f->Name); +#define COMMON_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &cf->Name); +#include "asan_activation_flags.inc" +#undef ASAN_ACTIVATION_FLAG +#undef COMMON_ACTIVATION_FLAG + + RegisterIncludeFlags(parser, cf); + } + + void OverrideFromActivationFlags() { + Flags f; + CommonFlags cf; + FlagParser parser; + RegisterActivationFlags(&parser, &f, &cf); + + cf.SetDefaults(); + // Copy the current activation flags. + allocator_options.CopyTo(&f, &cf); + cf.malloc_context_size = malloc_context_size; + f.poison_heap = poison_heap; + cf.coverage = coverage; + cf.coverage_dir = coverage_dir; + cf.verbosity = Verbosity(); + cf.help = false; // this is activation-specific help + + // Check if activation flags need to be overriden. + if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) { + parser.ParseString(env); + } + + InitializeCommonFlags(&cf); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (cf.help) parser.PrintFlagDescriptions(); + + allocator_options.SetFrom(&f, &cf); + malloc_context_size = cf.malloc_context_size; + poison_heap = f.poison_heap; + coverage = cf.coverage; + coverage_dir = cf.coverage_dir; + } + + void Print() { + Report( + "quarantine_size_mb %d, thread_local_quarantine_size_kb %d, " + "max_redzone %d, poison_heap %d, malloc_context_size %d, " + "alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, " + "coverage_dir %s, allocator_release_to_os_interval_ms %d\n", + allocator_options.quarantine_size_mb, + allocator_options.thread_local_quarantine_size_kb, + allocator_options.max_redzone, poison_heap, malloc_context_size, + allocator_options.alloc_dealloc_mismatch, + allocator_options.may_return_null, coverage, coverage_dir, + allocator_options.release_to_os_interval_ms); + } +} asan_deactivated_flags; + +static bool asan_is_deactivated; + +void AsanDeactivate() { + CHECK(!asan_is_deactivated); + VReport(1, "Deactivating ASan\n"); + + // Stash runtime state. + GetAllocatorOptions(&asan_deactivated_flags.allocator_options); + asan_deactivated_flags.malloc_context_size = GetMallocContextSize(); + asan_deactivated_flags.poison_heap = CanPoisonMemory(); + asan_deactivated_flags.coverage = common_flags()->coverage; + asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir; + + // Deactivate the runtime. + SetCanPoisonMemory(false); + SetMallocContextSize(1); + + AllocatorOptions disabled = asan_deactivated_flags.allocator_options; + disabled.quarantine_size_mb = 0; + disabled.thread_local_quarantine_size_kb = 0; + // Redzone must be at least Max(16, granularity) bytes long. + disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY); + disabled.max_redzone = disabled.min_redzone; + disabled.alloc_dealloc_mismatch = false; + disabled.may_return_null = true; + ReInitializeAllocator(disabled); + + asan_is_deactivated = true; +} + +void AsanActivate() { + if (!asan_is_deactivated) return; + VReport(1, "Activating ASan\n"); + + UpdateProcessName(); + + asan_deactivated_flags.OverrideFromActivationFlags(); + + SetCanPoisonMemory(asan_deactivated_flags.poison_heap); + SetMallocContextSize(asan_deactivated_flags.malloc_context_size); + ReInitializeAllocator(asan_deactivated_flags.allocator_options); + + asan_is_deactivated = false; + if (Verbosity()) { + Report("Activated with flags:\n"); + asan_deactivated_flags.Print(); + } +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_activation.h b/contrib/compiler-rt/lib/asan/asan_activation.h new file mode 100644 index 000000000000..d5e1ce433001 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_activation.h @@ -0,0 +1,23 @@ +//===-- asan_activation.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan activation/deactivation logic. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_ACTIVATION_H +#define ASAN_ACTIVATION_H + +namespace __asan { +void AsanDeactivate(); +void AsanActivate(); +} // namespace __asan + +#endif // ASAN_ACTIVATION_H diff --git a/contrib/compiler-rt/lib/asan/asan_activation_flags.inc b/contrib/compiler-rt/lib/asan/asan_activation_flags.inc new file mode 100644 index 000000000000..1c66e5bb53a5 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_activation_flags.inc @@ -0,0 +1,37 @@ +//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// A subset of ASan (and common) runtime flags supported at activation time. +// +//===----------------------------------------------------------------------===// +#ifndef ASAN_ACTIVATION_FLAG +# error "Define ASAN_ACTIVATION_FLAG prior to including this file!" +#endif + +#ifndef COMMON_ACTIVATION_FLAG +# error "Define COMMON_ACTIVATION_FLAG prior to including this file!" +#endif + +// ASAN_ACTIVATION_FLAG(Type, Name) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +ASAN_ACTIVATION_FLAG(int, redzone) +ASAN_ACTIVATION_FLAG(int, max_redzone) +ASAN_ACTIVATION_FLAG(int, quarantine_size_mb) +ASAN_ACTIVATION_FLAG(int, thread_local_quarantine_size_kb) +ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch) +ASAN_ACTIVATION_FLAG(bool, poison_heap) + +COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null) +COMMON_ACTIVATION_FLAG(int, malloc_context_size) +COMMON_ACTIVATION_FLAG(bool, coverage) +COMMON_ACTIVATION_FLAG(const char *, coverage_dir) +COMMON_ACTIVATION_FLAG(int, verbosity) +COMMON_ACTIVATION_FLAG(bool, help) +COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms) diff --git a/contrib/compiler-rt/lib/asan/asan_allocator.cc b/contrib/compiler-rt/lib/asan/asan_allocator.cc new file mode 100644 index 000000000000..c0fad4fa042b --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_allocator.cc @@ -0,0 +1,1109 @@ +//===-- asan_allocator.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Implementation of ASan's memory allocator, 2-nd version. +// This variant uses the allocator from sanitizer_common, i.e. the one shared +// with ThreadSanitizer and MemorySanitizer. +// +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_list.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_quarantine.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. +// We use adaptive redzones: for larger allocation larger redzones are used. +static u32 RZLog2Size(u32 rz_log) { + CHECK_LT(rz_log, 8); + return 16 << rz_log; +} + +static u32 RZSize2Log(u32 rz_size) { + CHECK_GE(rz_size, 16); + CHECK_LE(rz_size, 2048); + CHECK(IsPowerOfTwo(rz_size)); + u32 res = Log2(rz_size) - 4; + CHECK_EQ(rz_size, RZLog2Size(res)); + return res; +} + +static AsanAllocator &get_allocator(); + +// The memory chunk allocated from the underlying allocator looks like this: +// L L L L L L H H U U U U U U R R +// L -- left redzone words (0 or more bytes) +// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. +// U -- user memory. +// R -- right redzone (0 or more bytes) +// ChunkBase consists of ChunkHeader and other bytes that overlap with user +// memory. + +// If the left redzone is greater than the ChunkHeader size we store a magic +// value in the first uptr word of the memory block and store the address of +// ChunkBase in the next uptr. +// M B L L L L L L L L L H H U U U U U U +// | ^ +// ---------------------| +// M -- magic value kAllocBegMagic +// B -- address of ChunkHeader pointing to the first 'H' +static const uptr kAllocBegMagic = 0xCC6E96B9; + +struct ChunkHeader { + // 1-st 8 bytes. + u32 chunk_state : 8; // Must be first. + u32 alloc_tid : 24; + + u32 free_tid : 24; + u32 from_memalign : 1; + u32 alloc_type : 2; + u32 rz_log : 3; + u32 lsan_tag : 2; + // 2-nd 8 bytes + // This field is used for small sizes. For large sizes it is equal to + // SizeClassMap::kMaxSize and the actual size is stored in the + // SecondaryAllocator's metadata. + u32 user_requested_size : 29; + // align < 8 -> 0 + // else -> log2(min(align, 512)) - 2 + u32 user_requested_alignment_log : 3; + u32 alloc_context_id; +}; + +struct ChunkBase : ChunkHeader { + // Header2, intersects with user memory. + u32 free_context_id; +}; + +static const uptr kChunkHeaderSize = sizeof(ChunkHeader); +static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; +COMPILER_CHECK(kChunkHeaderSize == 16); +COMPILER_CHECK(kChunkHeader2Size <= 16); + +// Every chunk of memory allocated by this allocator can be in one of 3 states: +// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. +// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. +// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. +enum { + CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. + CHUNK_ALLOCATED = 2, + CHUNK_QUARANTINE = 3 +}; + +struct AsanChunk: ChunkBase { + uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } + uptr UsedSize(bool locked_version = false) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + return *reinterpret_cast<uptr *>( + get_allocator().GetMetaData(AllocBeg(locked_version))); + } + void *AllocBeg(bool locked_version = false) { + if (from_memalign) { + if (locked_version) + return get_allocator().GetBlockBeginFastLocked( + reinterpret_cast<void *>(this)); + return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this)); + } + return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); + } + bool AddrIsInside(uptr addr, bool locked_version = false) { + return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + } +}; + +struct QuarantineCallback { + QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) + : cache_(cache), + stack_(stack) { + } + + void Recycle(AsanChunk *m) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); + CHECK_NE(m->alloc_tid, kInvalidTid); + CHECK_NE(m->free_tid, kInvalidTid); + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapLeftRedzoneMagic); + void *p = reinterpret_cast<void *>(m->AllocBeg()); + if (p != m) { + uptr *alloc_magic = reinterpret_cast<uptr *>(p); + CHECK_EQ(alloc_magic[0], kAllocBegMagic); + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + alloc_magic[0] = 0; + CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); + } + + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.real_frees++; + thread_stats.really_freed += m->UsedSize(); + + get_allocator().Deallocate(cache_, p); + } + + void *Allocate(uptr size) { + void *res = get_allocator().Allocate(cache_, size, 1); + // TODO(alekseys): Consider making quarantine OOM-friendly. + if (UNLIKELY(!res)) + ReportOutOfMemory(size, stack_); + return res; + } + + void Deallocate(void *p) { + get_allocator().Deallocate(cache_, p); + } + + private: + AllocatorCache* const cache_; + BufferedStackTrace* const stack_; +}; + +typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; +typedef AsanQuarantine::Cache QuarantineCache; + +void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { + PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mmaps++; + thread_stats.mmaped += size; +} +void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { + PoisonShadow(p, size, 0); + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + FlushUnneededASanShadowMemory(p, size); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.munmaps++; + thread_stats.munmaped += size; +} + +// We can not use THREADLOCAL because it is not supported on some of the +// platforms we care about (OSX 10.6, Android). +// static THREADLOCAL AllocatorCache cache; +AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + return &ms->allocator_cache; +} + +QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); + return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); +} + +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { + quarantine_size_mb = f->quarantine_size_mb; + thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; + min_redzone = f->redzone; + max_redzone = f->max_redzone; + may_return_null = cf->allocator_may_return_null; + alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; + release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; +} + +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { + f->quarantine_size_mb = quarantine_size_mb; + f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; + f->redzone = min_redzone; + f->max_redzone = max_redzone; + cf->allocator_may_return_null = may_return_null; + f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; + cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; +} + +struct Allocator { + static const uptr kMaxAllowedMallocSize = + FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); + + AsanAllocator allocator; + AsanQuarantine quarantine; + StaticSpinMutex fallback_mutex; + AllocatorCache fallback_allocator_cache; + QuarantineCache fallback_quarantine_cache; + + atomic_uint8_t rss_limit_exceeded; + + // ------------------- Options -------------------------- + atomic_uint16_t min_redzone; + atomic_uint16_t max_redzone; + atomic_uint8_t alloc_dealloc_mismatch; + + // ------------------- Initialization ------------------------ + explicit Allocator(LinkerInitialized) + : quarantine(LINKER_INITIALIZED), + fallback_quarantine_cache(LINKER_INITIALIZED) {} + + void CheckOptions(const AllocatorOptions &options) const { + CHECK_GE(options.min_redzone, 16); + CHECK_GE(options.max_redzone, options.min_redzone); + CHECK_LE(options.max_redzone, 2048); + CHECK(IsPowerOfTwo(options.min_redzone)); + CHECK(IsPowerOfTwo(options.max_redzone)); + } + + void SharedInitCode(const AllocatorOptions &options) { + CheckOptions(options); + quarantine.Init((uptr)options.quarantine_size_mb << 20, + (uptr)options.thread_local_quarantine_size_kb << 10); + atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, + memory_order_release); + atomic_store(&min_redzone, options.min_redzone, memory_order_release); + atomic_store(&max_redzone, options.max_redzone, memory_order_release); + } + + void InitLinkerInitialized(const AllocatorOptions &options) { + SetAllocatorMayReturnNull(options.may_return_null); + allocator.InitLinkerInitialized(options.release_to_os_interval_ms); + SharedInitCode(options); + } + + bool RssLimitExceeded() { + return atomic_load(&rss_limit_exceeded, memory_order_relaxed); + } + + void SetRssLimitExceeded(bool limit_exceeded) { + atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); + } + + void RePoisonChunk(uptr chunk) { + // This could be a user-facing chunk (with redzones), or some internal + // housekeeping chunk, like TransferBatch. Start by assuming the former. + AsanChunk *ac = GetAsanChunk((void *)chunk); + uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); + uptr beg = ac->Beg(); + uptr end = ac->Beg() + ac->UsedSize(true); + uptr chunk_end = chunk + allocated_size; + if (chunk < beg && beg < end && end <= chunk_end && + ac->chunk_state == CHUNK_ALLOCATED) { + // Looks like a valid AsanChunk in use, poison redzones only. + PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); + uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); + FastPoisonShadowPartialRightRedzone( + end_aligned_down, end - end_aligned_down, + chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); + } else { + // This is either not an AsanChunk or freed or quarantined AsanChunk. + // In either case, poison everything. + PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); + } + } + + void ReInitialize(const AllocatorOptions &options) { + SetAllocatorMayReturnNull(options.may_return_null); + allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); + SharedInitCode(options); + + // Poison all existing allocation's redzones. + if (CanPoisonMemory()) { + allocator.ForceLock(); + allocator.ForEachChunk( + [](uptr chunk, void *alloc) { + ((Allocator *)alloc)->RePoisonChunk(chunk); + }, + this); + allocator.ForceUnlock(); + } + } + + void GetOptions(AllocatorOptions *options) const { + options->quarantine_size_mb = quarantine.GetSize() >> 20; + options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; + options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); + options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); + options->may_return_null = AllocatorMayReturnNull(); + options->alloc_dealloc_mismatch = + atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); + options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); + } + + // -------------------- Helper methods. ------------------------- + uptr ComputeRZLog(uptr user_requested_size) { + u32 rz_log = + user_requested_size <= 64 - 16 ? 0 : + user_requested_size <= 128 - 32 ? 1 : + user_requested_size <= 512 - 64 ? 2 : + user_requested_size <= 4096 - 128 ? 3 : + user_requested_size <= (1 << 14) - 256 ? 4 : + user_requested_size <= (1 << 15) - 512 ? 5 : + user_requested_size <= (1 << 16) - 1024 ? 6 : 7; + u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); + u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); + return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); + } + + static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { + if (user_requested_alignment < 8) + return 0; + if (user_requested_alignment > 512) + user_requested_alignment = 512; + return Log2(user_requested_alignment) - 2; + } + + static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { + if (user_requested_alignment_log == 0) + return 0; + return 1LL << (user_requested_alignment_log + 2); + } + + // We have an address between two chunks, and we want to report just one. + AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, + AsanChunk *right_chunk) { + // Prefer an allocated chunk over freed chunk and freed chunk + // over available chunk. + if (left_chunk->chunk_state != right_chunk->chunk_state) { + if (left_chunk->chunk_state == CHUNK_ALLOCATED) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_ALLOCATED) + return right_chunk; + if (left_chunk->chunk_state == CHUNK_QUARANTINE) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_QUARANTINE) + return right_chunk; + } + // Same chunk_state: choose based on offset. + sptr l_offset = 0, r_offset = 0; + CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); + CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); + if (l_offset < r_offset) + return left_chunk; + return right_chunk; + } + + // -------------------- Allocation/Deallocation routines --------------- + void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, + AllocType alloc_type, bool can_fill) { + if (UNLIKELY(!asan_inited)) + AsanInitFromRtl(); + if (RssLimitExceeded()) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportRssLimitExceeded(stack); + } + Flags &fl = *flags(); + CHECK(stack); + const uptr min_alignment = SHADOW_GRANULARITY; + const uptr user_requested_alignment_log = + ComputeUserRequestedAlignmentLog(alignment); + if (alignment < min_alignment) + alignment = min_alignment; + if (size == 0) { + // We'd be happy to avoid allocating memory for zero-size requests, but + // some programs/tests depend on this behavior and assume that malloc + // would not return NULL even for zero-size allocations. Moreover, it + // looks like operator new should never return NULL, and results of + // consecutive "new" calls must be different even if the allocated size + // is zero. + size = 1; + } + CHECK(IsPowerOfTwo(alignment)); + uptr rz_log = ComputeRZLog(size); + uptr rz_size = RZLog2Size(rz_log); + uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); + uptr needed_size = rounded_size + rz_size; + if (alignment > min_alignment) + needed_size += alignment; + bool using_primary_allocator = true; + // If we are allocating from the secondary allocator, there will be no + // automatic right redzone, so add the right redzone manually. + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + needed_size += rz_size; + using_primary_allocator = false; + } + CHECK(IsAligned(needed_size, min_alignment)); + if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { + if (AllocatorMayReturnNull()) { + Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", + (void*)size); + return nullptr; + } + ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize, + stack); + } + + AsanThread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, needed_size, 8); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, needed_size, 8); + } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } + + if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { + // Heap poisoning is enabled, but the allocator provides an unpoisoned + // chunk. This is possible if CanPoisonMemory() was false for some + // time, for example, due to flags()->start_disabled. + // Anyway, poison the block before using it for anything else. + uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); + PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); + } + + uptr alloc_beg = reinterpret_cast<uptr>(allocated); + uptr alloc_end = alloc_beg + needed_size; + uptr beg_plus_redzone = alloc_beg + rz_size; + uptr user_beg = beg_plus_redzone; + if (!IsAligned(user_beg, alignment)) + user_beg = RoundUpTo(user_beg, alignment); + uptr user_end = user_beg + size; + CHECK_LE(user_end, alloc_end); + uptr chunk_beg = user_beg - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + m->alloc_type = alloc_type; + m->rz_log = rz_log; + u32 alloc_tid = t ? t->tid() : 0; + m->alloc_tid = alloc_tid; + CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? + m->free_tid = kInvalidTid; + m->from_memalign = user_beg != beg_plus_redzone; + if (alloc_beg != chunk_beg) { + CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); + reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; + reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; + } + if (using_primary_allocator) { + CHECK(size); + m->user_requested_size = size; + CHECK(allocator.FromPrimary(allocated)); + } else { + CHECK(!allocator.FromPrimary(allocated)); + m->user_requested_size = SizeClassMap::kMaxSize; + uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); + meta[0] = size; + meta[1] = chunk_beg; + } + m->user_requested_alignment_log = user_requested_alignment_log; + + m->alloc_context_id = StackDepotPut(*stack); + + uptr size_rounded_down_to_granularity = + RoundDownTo(size, SHADOW_GRANULARITY); + // Unpoison the bulk of the memory region. + if (size_rounded_down_to_granularity) + PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); + // Deal with the end of the region if size is not aligned to granularity. + if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { + u8 *shadow = + (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); + *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; + } + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mallocs++; + thread_stats.malloced += size; + thread_stats.malloced_redzones += needed_size - size; + if (needed_size > SizeClassMap::kMaxSize) + thread_stats.malloc_large++; + else + thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; + + void *res = reinterpret_cast<void *>(user_beg); + if (can_fill && fl.max_malloc_fill_size) { + uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); + REAL(memset)(res, fl.malloc_fill_byte, fill_size); + } +#if CAN_SANITIZE_LEAKS + m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored + : __lsan::kDirectlyLeaked; +#endif + // Must be the last mutation of metadata in this function. + atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); + ASAN_MALLOC_HOOK(res, size); + return res; + } + + // Set quarantine flag if chunk is allocated, issue ASan error report on + // available and quarantined chunks. Return true on success, false otherwise. + bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { + u8 old_chunk_state = CHUNK_ALLOCATED; + // Flip the chunk_state atomically to avoid race on double-free. + if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, + CHUNK_QUARANTINE, + memory_order_acquire)) { + ReportInvalidFree(ptr, old_chunk_state, stack); + // It's not safe to push a chunk in quarantine on invalid free. + return false; + } + CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); + return true; + } + + // Expects the chunk to already be marked as quarantined by using + // AtomicallySetQuarantineFlagIfAllocated. + void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + CHECK_GE(m->alloc_tid, 0); + if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. + CHECK_EQ(m->free_tid, kInvalidTid); + AsanThread *t = GetCurrentThread(); + m->free_tid = t ? t->tid() : 0; + m->free_context_id = StackDepotPut(*stack); + + Flags &fl = *flags(); + if (fl.max_free_fill_size > 0) { + // We have to skip the chunk header, it contains free_context_id. + uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; + if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. + uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; + size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); + REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); + } + } + + // Poison the region. + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapFreeMagic); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.frees++; + thread_stats.freed += m->UsedSize(); + + // Push into quarantine. + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, + m->UsedSize()); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *ac = &fallback_allocator_cache; + quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), + m, m->UsedSize()); + } + } + + void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, + BufferedStackTrace *stack, AllocType alloc_type) { + uptr p = reinterpret_cast<uptr>(ptr); + if (p == 0) return; + + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + + // On Windows, uninstrumented DLLs may allocate memory before ASan hooks + // malloc. Don't report an invalid free in this case. + if (SANITIZER_WINDOWS && + !get_allocator().PointerIsMine(ptr)) { + if (!IsSystemHeapAddress(p)) + ReportFreeNotMalloced(p, stack); + return; + } + + ASAN_FREE_HOOK(ptr); + + // Must mark the chunk as quarantined before any changes to its metadata. + // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. + if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; + + if (m->alloc_type != alloc_type) { + if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { + ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, + (AllocType)alloc_type); + } + } else { + if (flags()->new_delete_type_mismatch && + (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && + ((delete_size && delete_size != m->UsedSize()) || + ComputeUserRequestedAlignmentLog(delete_alignment) != + m->user_requested_alignment_log)) { + ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); + } + } + + QuarantineChunk(m, ptr, stack); + } + + void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { + CHECK(old_ptr && new_size); + uptr p = reinterpret_cast<uptr>(old_ptr); + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.reallocs++; + thread_stats.realloced += new_size; + + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + if (new_ptr) { + u8 chunk_state = m->chunk_state; + if (chunk_state != CHUNK_ALLOCATED) + ReportInvalidFree(old_ptr, chunk_state, stack); + CHECK_NE(REAL(memcpy), nullptr); + uptr memcpy_size = Min(new_size, m->UsedSize()); + // If realloc() races with free(), we may start copying freed memory. + // However, we will report racy double-free later anyway. + REAL(memcpy)(new_ptr, old_ptr, memcpy_size); + Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); + } + return new_ptr; + } + + void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + // If the memory comes from the secondary allocator no need to clear it + // as it comes directly from mmap. + if (ptr && allocator.FromPrimary(ptr)) + REAL(memset)(ptr, 0, nmemb * size); + return ptr; + } + + void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { + if (chunk_state == CHUNK_QUARANTINE) + ReportDoubleFree((uptr)ptr, stack); + else + ReportFreeNotMalloced((uptr)ptr, stack); + } + + void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); + allocator.SwallowCache(ac); + } + + // -------------------------- Chunk lookup ---------------------- + + // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). + AsanChunk *GetAsanChunk(void *alloc_beg) { + if (!alloc_beg) return nullptr; + if (!allocator.FromPrimary(alloc_beg)) { + uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); + AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); + return m; + } + uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); + if (alloc_magic[0] == kAllocBegMagic) + return reinterpret_cast<AsanChunk *>(alloc_magic[1]); + return reinterpret_cast<AsanChunk *>(alloc_beg); + } + + AsanChunk *GetAsanChunkByAddr(uptr p) { + void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); + return GetAsanChunk(alloc_beg); + } + + // Allocator must be locked when this function is called. + AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { + void *alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); + return GetAsanChunk(alloc_beg); + } + + uptr AllocationSize(uptr p) { + AsanChunk *m = GetAsanChunkByAddr(p); + if (!m) return 0; + if (m->chunk_state != CHUNK_ALLOCATED) return 0; + if (m->Beg() != p) return 0; + return m->UsedSize(); + } + + AsanChunkView FindHeapChunkByAddress(uptr addr) { + AsanChunk *m1 = GetAsanChunkByAddr(addr); + if (!m1) return AsanChunkView(m1); + sptr offset = 0; + if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { + // The address is in the chunk's left redzone, so maybe it is actually + // a right buffer overflow from the other chunk to the left. + // Search a bit to the left to see if there is another chunk. + AsanChunk *m2 = nullptr; + for (uptr l = 1; l < GetPageSizeCached(); l++) { + m2 = GetAsanChunkByAddr(addr - l); + if (m2 == m1) continue; // Still the same chunk. + break; + } + if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) + m1 = ChooseChunk(addr, m2, m1); + } + return AsanChunkView(m1); + } + + void Purge(BufferedStackTrace *stack) { + AsanThread *t = GetCurrentThread(); + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + quarantine.DrainAndRecycle(GetQuarantineCache(ms), + QuarantineCallback(GetAllocatorCache(ms), + stack)); + } + { + SpinMutexLock l(&fallback_mutex); + quarantine.DrainAndRecycle(&fallback_quarantine_cache, + QuarantineCallback(&fallback_allocator_cache, + stack)); + } + + allocator.ForceReleaseToOS(); + } + + void PrintStats() { + allocator.PrintStats(); + quarantine.PrintStats(); + } + + void ForceLock() { + allocator.ForceLock(); + fallback_mutex.Lock(); + } + + void ForceUnlock() { + fallback_mutex.Unlock(); + allocator.ForceUnlock(); + } +}; + +static Allocator instance(LINKER_INITIALIZED); + +static AsanAllocator &get_allocator() { + return instance.allocator; +} + +bool AsanChunkView::IsValid() const { + return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; +} +bool AsanChunkView::IsAllocated() const { + return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; +} +bool AsanChunkView::IsQuarantined() const { + return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; +} +uptr AsanChunkView::Beg() const { return chunk_->Beg(); } +uptr AsanChunkView::End() const { return Beg() + UsedSize(); } +uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } +u32 AsanChunkView::UserRequestedAlignment() const { + return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); +} +uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } +uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } +AllocType AsanChunkView::GetAllocType() const { + return (AllocType)chunk_->alloc_type; +} + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } +u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } + +StackTrace AsanChunkView::GetAllocStack() const { + return GetStackTraceFromId(GetAllocStackId()); +} + +StackTrace AsanChunkView::GetFreeStack() const { + return GetStackTraceFromId(GetFreeStackId()); +} + +void InitializeAllocator(const AllocatorOptions &options) { + instance.InitLinkerInitialized(options); +} + +void ReInitializeAllocator(const AllocatorOptions &options) { + instance.ReInitialize(options); +} + +void GetAllocatorOptions(AllocatorOptions *options) { + instance.GetOptions(options); +} + +AsanChunkView FindHeapChunkByAddress(uptr addr) { + return instance.FindHeapChunkByAddress(addr); +} +AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { + return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr))); +} + +void AsanThreadLocalMallocStorage::CommitBack() { + GET_STACK_TRACE_MALLOC; + instance.CommitBack(this, &stack); +} + +void PrintInternalAllocatorStats() { + instance.PrintStats(); +} + +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { + instance.Deallocate(ptr, 0, 0, stack, alloc_type); +} + +void asan_delete(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack, AllocType alloc_type) { + instance.Deallocate(ptr, size, alignment, stack, alloc_type); +} + +void *asan_malloc(uptr size, BufferedStackTrace *stack) { + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); +} + +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); +} + +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { + if (!p) + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); + if (size == 0) { + if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { + instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); + return nullptr; + } + // Allocate a size of 1 if we shouldn't free() on Realloc to 0 + size = 1; + } + return SetErrnoOnNull(instance.Reallocate(p, size, stack)); +} + +void *asan_valloc(uptr size, BufferedStackTrace *stack) { + return SetErrnoOnNull( + instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); +} + +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull( + instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); +} + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); + } + return SetErrnoOnNull( + instance.Allocate(size, alignment, stack, alloc_type, true)); +} + +void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); + } + return SetErrnoOnNull( + instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); +} + +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); + } + void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { + if (!ptr) return 0; + uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); + if (flags()->check_malloc_usable_size && (usable_size == 0)) { + GET_STACK_TRACE_FATAL(pc, bp); + ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); + } + return usable_size; +} + +uptr asan_mz_size(const void *ptr) { + return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); +} + +void asan_mz_force_lock() { + instance.ForceLock(); +} + +void asan_mz_force_unlock() { + instance.ForceUnlock(); +} + +void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { + instance.SetRssLimitExceeded(limit_exceeded); +} + +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +void LockAllocator() { + __asan::get_allocator().ForceLock(); +} + +void UnlockAllocator() { + __asan::get_allocator().ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&__asan::get_allocator(); + *end = *begin + sizeof(__asan::get_allocator()); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); + if (!m) return 0; + uptr chunk = m->Beg(); + if (m->chunk_state != __asan::CHUNK_ALLOCATED) + return 0; + if (m->AddrIsInside(addr, /*locked_version=*/true)) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), + addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + CHECK(m); + return m->Beg(); +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); +} + +bool LsanMetadata::allocated() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->chunk_state == __asan::CHUNK_ALLOCATED; +} + +ChunkTag LsanMetadata::tag() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return static_cast<ChunkTag>(m->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + m->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->UsedSize(/*locked_version=*/true); +} + +u32 LsanMetadata::stack_trace_id() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->alloc_context_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::get_allocator().ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); + if (!m) return kIgnoreObjectInvalid; + if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { + if (m->lsan_tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->lsan_tag = __lsan::kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// ASan allocator doesn't reserve extra bytes, so normally we would +// just return "size". We don't want to expose our redzone sizes, etc here. +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + uptr ptr = reinterpret_cast<uptr>(p); + return instance.AllocationSize(ptr) > 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + if (!p) return 0; + uptr ptr = reinterpret_cast<uptr>(p); + uptr allocated_size = instance.AllocationSize(ptr); + // Die if p is not malloced or if it is already freed. + if (allocated_size == 0) { + GET_STACK_TRACE_FATAL_HERE; + ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); + } + return allocated_size; +} + +void __sanitizer_purge_allocator() { + GET_STACK_TRACE_MALLOC; + instance.Purge(&stack); +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, + void *ptr, uptr size) { + (void)ptr; + (void)size; +} + +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { + (void)ptr; +} +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_allocator.h b/contrib/compiler-rt/lib/asan/asan_allocator.h new file mode 100644 index 000000000000..c9b37dc7a6d4 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_allocator.h @@ -0,0 +1,245 @@ +//===-- asan_allocator.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_allocator.cc. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_ALLOCATOR_H +#define ASAN_ALLOCATOR_H + +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_list.h" + +namespace __asan { + +enum AllocType { + FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc. + FROM_NEW = 2, // Memory block came from operator new. + FROM_NEW_BR = 3 // Memory block came from operator new [ ] +}; + +struct AsanChunk; + +struct AllocatorOptions { + u32 quarantine_size_mb; + u32 thread_local_quarantine_size_kb; + u16 min_redzone; + u16 max_redzone; + u8 may_return_null; + u8 alloc_dealloc_mismatch; + s32 release_to_os_interval_ms; + + void SetFrom(const Flags *f, const CommonFlags *cf); + void CopyTo(Flags *f, CommonFlags *cf); +}; + +void InitializeAllocator(const AllocatorOptions &options); +void ReInitializeAllocator(const AllocatorOptions &options); +void GetAllocatorOptions(AllocatorOptions *options); + +class AsanChunkView { + public: + explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} + bool IsValid() const; // Checks if AsanChunkView points to a valid + // allocated or quarantined chunk. + bool IsAllocated() const; // Checks if the memory is currently allocated. + bool IsQuarantined() const; // Checks if the memory is currently quarantined. + uptr Beg() const; // First byte of user memory. + uptr End() const; // Last byte of user memory. + uptr UsedSize() const; // Size requested by the user. + u32 UserRequestedAlignment() const; // Originally requested alignment. + uptr AllocTid() const; + uptr FreeTid() const; + bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } + u32 GetAllocStackId() const; + u32 GetFreeStackId() const; + StackTrace GetAllocStack() const; + StackTrace GetFreeStack() const; + AllocType GetAllocType() const; + bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const { + if (addr >= Beg() && (addr + access_size) <= End()) { + *offset = addr - Beg(); + return true; + } + return false; + } + bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const { + (void)access_size; + if (addr < Beg()) { + *offset = Beg() - addr; + return true; + } + return false; + } + bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const { + if (addr + access_size > End()) { + *offset = addr - End(); + return true; + } + return false; + } + + private: + AsanChunk *const chunk_; +}; + +AsanChunkView FindHeapChunkByAddress(uptr address); +AsanChunkView FindHeapChunkByAllocBeg(uptr address); + +// List of AsanChunks with total size. +class AsanChunkFifoList: public IntrusiveList<AsanChunk> { + public: + explicit AsanChunkFifoList(LinkerInitialized) { } + AsanChunkFifoList() { clear(); } + void Push(AsanChunk *n); + void PushList(AsanChunkFifoList *q); + AsanChunk *Pop(); + uptr size() { return size_; } + void clear() { + IntrusiveList<AsanChunk>::clear(); + size_ = 0; + } + private: + uptr size_; +}; + +struct AsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const; + void OnUnmap(uptr p, uptr size) const; +}; + +#if SANITIZER_CAN_USE_ALLOCATOR64 +# if SANITIZER_FUCHSIA +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +typedef DefaultSizeClassMap SizeClassMap; +# elif defined(__powerpc64__) +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +typedef DefaultSizeClassMap SizeClassMap; +# elif defined(__aarch64__) && SANITIZER_ANDROID +// Android needs to support 39, 42 and 48 bit VMA. +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x2000000000ULL; // 128G. +typedef VeryCompactSizeClassMap SizeClassMap; +# elif defined(__aarch64__) +// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA +// so no need to different values for different VMA. +const uptr kAllocatorSpace = 0x10000000000ULL; +const uptr kAllocatorSize = 0x10000000000ULL; // 3T. +typedef DefaultSizeClassMap SizeClassMap; +# elif SANITIZER_WINDOWS +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x8000000000ULL; // 500G +typedef DefaultSizeClassMap SizeClassMap; +# else +const uptr kAllocatorSpace = 0x600000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +typedef DefaultSizeClassMap SizeClassMap; +# endif +template <typename AddressSpaceViewTy> +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = 0; + typedef __asan::SizeClassMap SizeClassMap; + typedef AsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; + +template <typename AddressSpaceView> +using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>; +using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>; +#else // Fallback to SizeClassAllocator32. +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +# if SANITIZER_WORDSIZE == 32 +template <typename AddressSpaceView> +using ByteMapASVT = FlatByteMap<kNumRegions, AddressSpaceView>; +# elif SANITIZER_WORDSIZE == 64 +template <typename AddressSpaceView> +using ByteMapASVT = + TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; +# endif +typedef CompactSizeClassMap SizeClassMap; +template <typename AddressSpaceViewTy> +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = 16; + typedef __asan::SizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __asan::kRegionSizeLog; + using AddressSpaceView = AddressSpaceViewTy; + using ByteMap = __asan::ByteMapASVT<AddressSpaceView>; + typedef AsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; +template <typename AddressSpaceView> +using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >; +using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>; +#endif // SANITIZER_CAN_USE_ALLOCATOR64 + +static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; +template <typename AddressSpaceView> +using AllocatorCacheASVT = + SizeClassAllocatorLocalCache<PrimaryAllocatorASVT<AddressSpaceView>>; +using AllocatorCache = AllocatorCacheASVT<LocalAddressSpaceView>; + +template <typename AddressSpaceView> +using SecondaryAllocatorASVT = + LargeMmapAllocator<AsanMapUnmapCallback, DefaultLargeMmapAllocatorPtrArray, + AddressSpaceView>; +template <typename AddressSpaceView> +using AsanAllocatorASVT = + CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>, + AllocatorCacheASVT<AddressSpaceView>, + SecondaryAllocatorASVT<AddressSpaceView>>; +using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>; + +struct AsanThreadLocalMallocStorage { + uptr quarantine_cache[16]; + AllocatorCache allocator_cache; + void CommitBack(); + private: + // These objects are allocated via mmap() and are zero-initialized. + AsanThreadLocalMallocStorage() {} +}; + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type); +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type); +void asan_delete(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack, AllocType alloc_type); + +void *asan_malloc(uptr size, BufferedStackTrace *stack); +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack); +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack); +void *asan_valloc(uptr size, BufferedStackTrace *stack); +void *asan_pvalloc(uptr size, BufferedStackTrace *stack); + +void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack); +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack); +uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp); + +uptr asan_mz_size(const void *ptr); +void asan_mz_force_lock(); +void asan_mz_force_unlock(); + +void PrintInternalAllocatorStats(); +void AsanSoftRssLimitExceededCallback(bool exceeded); + +} // namespace __asan +#endif // ASAN_ALLOCATOR_H diff --git a/contrib/compiler-rt/lib/asan/asan_blacklist.txt b/contrib/compiler-rt/lib/asan/asan_blacklist.txt new file mode 100644 index 000000000000..c25921fd5fe7 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_blacklist.txt @@ -0,0 +1,13 @@ +# Blacklist for AddressSanitizer. Turns off instrumentation of particular +# functions or sources. Use with care. You may set location of blacklist +# at compile-time using -fsanitize-blacklist=<path> flag. + +# Example usage: +# fun:*bad_function_name* +# src:file_with_tricky_code.cc +# global:*global_with_bad_access_or_initialization* +# global:*global_with_initialization_issues*=init +# type:*Namespace::ClassName*=init + +# Stack buffer overflow in VC/INCLUDE/xlocnum, see http://goo.gl/L4qqUG +fun:*_Find_elem@*@std* diff --git a/contrib/compiler-rt/lib/asan/asan_debugging.cc b/contrib/compiler-rt/lib/asan/asan_debugging.cc new file mode 100644 index 000000000000..7c877ded30cf --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_debugging.cc @@ -0,0 +1,147 @@ +//===-- asan_debugging.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file contains various functions that are generally useful to call when +// using a debugger (LLDB, GDB). +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_descriptions.h" +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_thread.h" + +namespace { +using namespace __asan; + +static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset, + char *name, uptr name_size, + uptr ®ion_address, uptr ®ion_size) { + InternalMmapVector<StackVarDescr> vars; + vars.reserve(16); + if (!ParseFrameDescription(frame_descr, &vars)) { + return; + } + + for (uptr i = 0; i < vars.size(); i++) { + if (offset <= vars[i].beg + vars[i].size) { + // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so + // if we're limiting the copy due to name_len, we add 1 to ensure we copy + // the whole name and then terminate with '\0'. + internal_strlcpy(name, vars[i].name_pos, + Min(name_size, vars[i].name_len + 1)); + region_address = addr - (offset - vars[i].beg); + region_size = vars[i].size; + return; + } + } +} + +uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id, + bool alloc_stack) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + if (!chunk.IsValid()) return 0; + + StackTrace stack(nullptr, 0); + if (alloc_stack) { + if (chunk.AllocTid() == kInvalidTid) return 0; + stack = chunk.GetAllocStack(); + if (thread_id) *thread_id = chunk.AllocTid(); + } else { + if (chunk.FreeTid() == kInvalidTid) return 0; + stack = chunk.GetFreeStack(); + if (thread_id) *thread_id = chunk.FreeTid(); + } + + if (trace && size) { + size = Min(size, Min(stack.size, kStackTraceMax)); + for (uptr i = 0; i < size; i++) + trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]); + + return size; + } + + return 0; +} + +} // namespace + +SANITIZER_INTERFACE_ATTRIBUTE +const char *__asan_locate_address(uptr addr, char *name, uptr name_size, + uptr *region_address_ptr, + uptr *region_size_ptr) { + AddressDescription descr(addr); + uptr region_address = 0; + uptr region_size = 0; + const char *region_kind = nullptr; + if (name && name_size > 0) name[0] = 0; + + if (auto shadow = descr.AsShadow()) { + // region_{address,size} are already 0 + switch (shadow->kind) { + case kShadowKindLow: + region_kind = "low shadow"; + break; + case kShadowKindGap: + region_kind = "shadow gap"; + break; + case kShadowKindHigh: + region_kind = "high shadow"; + break; + } + } else if (auto heap = descr.AsHeap()) { + region_kind = "heap"; + region_address = heap->chunk_access.chunk_begin; + region_size = heap->chunk_access.chunk_size; + } else if (auto stack = descr.AsStack()) { + region_kind = "stack"; + if (!stack->frame_descr) { + // region_{address,size} are already 0 + } else { + FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name, + name_size, region_address, region_size); + } + } else if (auto global = descr.AsGlobal()) { + region_kind = "global"; + auto &g = global->globals[0]; + internal_strlcpy(name, g.name, name_size); + region_address = g.beg; + region_size = g.size; + } else { + // region_{address,size} are already 0 + region_kind = "heap-invalid"; + } + + CHECK(region_kind); + if (region_address_ptr) *region_address_ptr = region_address; + if (region_size_ptr) *region_size_ptr = region_size; + return region_kind; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { + return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true); +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { + return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) { + if (shadow_scale) + *shadow_scale = SHADOW_SCALE; + if (shadow_offset) + *shadow_offset = SHADOW_OFFSET; +} diff --git a/contrib/compiler-rt/lib/asan/asan_descriptions.cc b/contrib/compiler-rt/lib/asan/asan_descriptions.cc new file mode 100644 index 000000000000..cdb562d975ff --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_descriptions.cc @@ -0,0 +1,502 @@ +//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan functions for getting information about an address and/or printing it. +//===----------------------------------------------------------------------===// + +#include "asan_descriptions.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) { + Init(t->tid, t->name); +} + +AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) { + if (tid == kInvalidTid) { + Init(tid, ""); + } else { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *t = GetThreadContextByTidLocked(tid); + Init(tid, t->name); + } +} + +void AsanThreadIdAndName::Init(u32 tid, const char *tname) { + int len = internal_snprintf(name, sizeof(name), "T%d", tid); + CHECK(((unsigned int)len) < sizeof(name)); + if (tname[0] != '\0') + internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname); +} + +void DescribeThread(AsanThreadContext *context) { + CHECK(context); + asanThreadRegistry().CheckLocked(); + // No need to announce the main thread. + if (context->tid == 0 || context->announced) { + return; + } + context->announced = true; + InternalScopedString str(1024); + str.append("Thread %s", AsanThreadIdAndName(context).c_str()); + if (context->parent_tid == kInvalidTid) { + str.append(" created by unknown thread\n"); + Printf("%s", str.data()); + return; + } + str.append(" created by %s here:\n", + AsanThreadIdAndName(context->parent_tid).c_str()); + Printf("%s", str.data()); + StackDepotGet(context->stack_id).Print(); + // Recursively described parent thread if needed. + if (flags()->print_full_thread_history) { + AsanThreadContext *parent_context = + GetThreadContextByTidLocked(context->parent_tid); + DescribeThread(parent_context); + } +} + +// Shadow descriptions +static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) { + CHECK(!AddrIsInMem(addr)); + if (AddrIsInShadowGap(addr)) { + *shadow_kind = kShadowKindGap; + } else if (AddrIsInHighShadow(addr)) { + *shadow_kind = kShadowKindHigh; + } else if (AddrIsInLowShadow(addr)) { + *shadow_kind = kShadowKindLow; + } else { + CHECK(0 && "Address is not in memory and not in shadow?"); + return false; + } + return true; +} + +bool DescribeAddressIfShadow(uptr addr) { + ShadowAddressDescription descr; + if (!GetShadowAddressInformation(addr, &descr)) return false; + descr.Print(); + return true; +} + +bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) { + if (AddrIsInMem(addr)) return false; + ShadowKind shadow_kind; + if (!GetShadowKind(addr, &shadow_kind)) return false; + if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr; + descr->addr = addr; + descr->kind = shadow_kind; + return true; +} + +// Heap descriptions +static void GetAccessToHeapChunkInformation(ChunkAccess *descr, + AsanChunkView chunk, uptr addr, + uptr access_size) { + descr->bad_addr = addr; + if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) { + descr->access_type = kAccessTypeLeft; + } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) { + descr->access_type = kAccessTypeRight; + if (descr->offset < 0) { + descr->bad_addr -= descr->offset; + descr->offset = 0; + } + } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) { + descr->access_type = kAccessTypeInside; + } else { + descr->access_type = kAccessTypeUnknown; + } + descr->chunk_begin = chunk.Beg(); + descr->chunk_size = chunk.UsedSize(); + descr->user_requested_alignment = chunk.UserRequestedAlignment(); + descr->alloc_type = chunk.GetAllocType(); +} + +static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) { + Decorator d; + InternalScopedString str(4096); + str.append("%s", d.Location()); + switch (descr.access_type) { + case kAccessTypeLeft: + str.append("%p is located %zd bytes to the left of", + (void *)descr.bad_addr, descr.offset); + break; + case kAccessTypeRight: + str.append("%p is located %zd bytes to the right of", + (void *)descr.bad_addr, descr.offset); + break; + case kAccessTypeInside: + str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr, + descr.offset); + break; + case kAccessTypeUnknown: + str.append( + "%p is located somewhere around (this is AddressSanitizer bug!)", + (void *)descr.bad_addr); + } + str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size, + (void *)descr.chunk_begin, + (void *)(descr.chunk_begin + descr.chunk_size)); + str.append("%s", d.Default()); + Printf("%s", str.data()); +} + +bool GetHeapAddressInformation(uptr addr, uptr access_size, + HeapAddressDescription *descr) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + if (!chunk.IsValid()) { + return false; + } + descr->addr = addr; + GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr, + access_size); + CHECK_NE(chunk.AllocTid(), kInvalidTid); + descr->alloc_tid = chunk.AllocTid(); + descr->alloc_stack_id = chunk.GetAllocStackId(); + descr->free_tid = chunk.FreeTid(); + if (descr->free_tid != kInvalidTid) + descr->free_stack_id = chunk.GetFreeStackId(); + return true; +} + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +bool DescribeAddressIfHeap(uptr addr, uptr access_size) { + HeapAddressDescription descr; + if (!GetHeapAddressInformation(addr, access_size, &descr)) { + Printf( + "AddressSanitizer can not describe address in more detail " + "(wild memory access suspected).\n"); + return false; + } + descr.Print(); + return true; +} + +// Stack descriptions +bool GetStackAddressInformation(uptr addr, uptr access_size, + StackAddressDescription *descr) { + AsanThread *t = FindThreadByStackAddress(addr); + if (!t) return false; + + descr->addr = addr; + descr->tid = t->tid(); + // Try to fetch precise stack frame for this access. + AsanThread::StackFrameAccess access; + if (!t->GetStackFrameAccessByAddr(addr, &access)) { + descr->frame_descr = nullptr; + return true; + } + + descr->offset = access.offset; + descr->access_size = access_size; + descr->frame_pc = access.frame_pc; + descr->frame_descr = access.frame_descr; + +#if SANITIZER_PPC64V1 + // On PowerPC64 ELFv1, the address of a function actually points to a + // three-doubleword data structure with the first field containing + // the address of the function's code. + descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc); +#endif + descr->frame_pc += 16; + + return true; +} + +static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr, + uptr access_size, uptr prev_var_end, + uptr next_var_beg) { + uptr var_end = var.beg + var.size; + uptr addr_end = addr + access_size; + const char *pos_descr = nullptr; + // If the variable [var.beg, var_end) is the nearest variable to the + // current memory access, indicate it in the log. + if (addr >= var.beg) { + if (addr_end <= var_end) + pos_descr = "is inside"; // May happen if this is a use-after-return. + else if (addr < var_end) + pos_descr = "partially overflows"; + else if (addr_end <= next_var_beg && + next_var_beg - addr_end >= addr - var_end) + pos_descr = "overflows"; + } else { + if (addr_end > var.beg) + pos_descr = "partially underflows"; + else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end) + pos_descr = "underflows"; + } + InternalScopedString str(1024); + str.append(" [%zd, %zd)", var.beg, var_end); + // Render variable name. + str.append(" '"); + for (uptr i = 0; i < var.name_len; ++i) { + str.append("%c", var.name_pos[i]); + } + str.append("'"); + if (var.line > 0) { + str.append(" (line %d)", var.line); + } + if (pos_descr) { + Decorator d; + // FIXME: we may want to also print the size of the access here, + // but in case of accesses generated by memset it may be confusing. + str.append("%s <== Memory access at offset %zd %s this variable%s\n", + d.Location(), addr, pos_descr, d.Default()); + } else { + str.append("\n"); + } + Printf("%s", str.data()); +} + +bool DescribeAddressIfStack(uptr addr, uptr access_size) { + StackAddressDescription descr; + if (!GetStackAddressInformation(addr, access_size, &descr)) return false; + descr.Print(); + return true; +} + +// Global descriptions +static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, + const __asan_global &g) { + InternalScopedString str(4096); + Decorator d; + str.append("%s", d.Location()); + if (addr < g.beg) { + str.append("%p is located %zd bytes to the left", (void *)addr, + g.beg - addr); + } else if (addr + access_size > g.beg + g.size) { + if (addr < g.beg + g.size) addr = g.beg + g.size; + str.append("%p is located %zd bytes to the right", (void *)addr, + addr - (g.beg + g.size)); + } else { + // Can it happen? + str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg); + } + str.append(" of global variable '%s' defined in '", + MaybeDemangleGlobalName(g.name)); + PrintGlobalLocation(&str, g); + str.append("' (0x%zx) of size %zu\n", g.beg, g.size); + str.append("%s", d.Default()); + PrintGlobalNameIfASCII(&str, g); + Printf("%s", str.data()); +} + +bool GetGlobalAddressInformation(uptr addr, uptr access_size, + GlobalAddressDescription *descr) { + descr->addr = addr; + int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites, + ARRAY_SIZE(descr->globals)); + descr->size = globals_num; + descr->access_size = access_size; + return globals_num != 0; +} + +bool DescribeAddressIfGlobal(uptr addr, uptr access_size, + const char *bug_type) { + GlobalAddressDescription descr; + if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false; + + descr.Print(bug_type); + return true; +} + +void ShadowAddressDescription::Print() const { + Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]); +} + +void GlobalAddressDescription::Print(const char *bug_type) const { + for (int i = 0; i < size; i++) { + DescribeAddressRelativeToGlobal(addr, access_size, globals[i]); + if (bug_type && + 0 == internal_strcmp(bug_type, "initialization-order-fiasco") && + reg_sites[i]) { + Printf(" registered at:\n"); + StackDepotGet(reg_sites[i]).Print(); + } + } +} + +bool GlobalAddressDescription::PointsInsideTheSameVariable( + const GlobalAddressDescription &other) const { + if (size == 0 || other.size == 0) return false; + + for (uptr i = 0; i < size; i++) { + const __asan_global &a = globals[i]; + for (uptr j = 0; j < other.size; j++) { + const __asan_global &b = other.globals[j]; + if (a.beg == b.beg && + a.beg <= addr && + b.beg <= other.addr && + (addr + access_size) < (a.beg + a.size) && + (other.addr + other.access_size) < (b.beg + b.size)) + return true; + } + } + + return false; +} + +void StackAddressDescription::Print() const { + Decorator d; + Printf("%s", d.Location()); + Printf("Address %p is located in stack of thread %s", addr, + AsanThreadIdAndName(tid).c_str()); + + if (!frame_descr) { + Printf("%s\n", d.Default()); + return; + } + Printf(" at offset %zu in frame%s\n", offset, d.Default()); + + // Now we print the frame where the alloca has happened. + // We print this frame as a stack trace with one element. + // The symbolizer may print more than one frame if inlining was involved. + // The frame numbers may be different than those in the stack trace printed + // previously. That's unfortunate, but I have no better solution, + // especially given that the alloca may be from entirely different place + // (e.g. use-after-scope, or different thread's stack). + Printf("%s", d.Default()); + StackTrace alloca_stack(&frame_pc, 1); + alloca_stack.Print(); + + InternalMmapVector<StackVarDescr> vars; + vars.reserve(16); + if (!ParseFrameDescription(frame_descr, &vars)) { + Printf( + "AddressSanitizer can't parse the stack frame " + "descriptor: |%s|\n", + frame_descr); + // 'addr' is a stack address, so return true even if we can't parse frame + return; + } + uptr n_objects = vars.size(); + // Report the number of stack objects. + Printf(" This frame has %zu object(s):\n", n_objects); + + // Report all objects in this frame. + for (uptr i = 0; i < n_objects; i++) { + uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0; + uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL); + PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end, + next_var_beg); + } + Printf( + "HINT: this may be a false positive if your program uses " + "some custom stack unwind mechanism, swapcontext or vfork\n"); + if (SANITIZER_WINDOWS) + Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n"); + else + Printf(" (longjmp and C++ exceptions *are* supported)\n"); + + DescribeThread(GetThreadContextByTidLocked(tid)); +} + +void HeapAddressDescription::Print() const { + PrintHeapChunkAccess(addr, chunk_access); + + asanThreadRegistry().CheckLocked(); + AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid); + StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id); + + Decorator d; + AsanThreadContext *free_thread = nullptr; + if (free_tid != kInvalidTid) { + free_thread = GetThreadContextByTidLocked(free_tid); + Printf("%sfreed by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(free_thread).c_str(), d.Default()); + StackTrace free_stack = GetStackTraceFromId(free_stack_id); + free_stack.Print(); + Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); + } else { + Printf("%sallocated by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); + } + alloc_stack.Print(); + DescribeThread(GetCurrentThread()); + if (free_thread) DescribeThread(free_thread); + DescribeThread(alloc_thread); +} + +AddressDescription::AddressDescription(uptr addr, uptr access_size, + bool shouldLockThreadRegistry) { + if (GetShadowAddressInformation(addr, &data.shadow)) { + data.kind = kAddressKindShadow; + return; + } + if (GetHeapAddressInformation(addr, access_size, &data.heap)) { + data.kind = kAddressKindHeap; + return; + } + + bool isStackMemory = false; + if (shouldLockThreadRegistry) { + ThreadRegistryLock l(&asanThreadRegistry()); + isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack); + } else { + isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack); + } + if (isStackMemory) { + data.kind = kAddressKindStack; + return; + } + + if (GetGlobalAddressInformation(addr, access_size, &data.global)) { + data.kind = kAddressKindGlobal; + return; + } + data.kind = kAddressKindWild; + addr = 0; +} + +void PrintAddressDescription(uptr addr, uptr access_size, + const char *bug_type) { + ShadowAddressDescription shadow_descr; + if (GetShadowAddressInformation(addr, &shadow_descr)) { + shadow_descr.Print(); + return; + } + + GlobalAddressDescription global_descr; + if (GetGlobalAddressInformation(addr, access_size, &global_descr)) { + global_descr.Print(bug_type); + return; + } + + StackAddressDescription stack_descr; + if (GetStackAddressInformation(addr, access_size, &stack_descr)) { + stack_descr.Print(); + return; + } + + HeapAddressDescription heap_descr; + if (GetHeapAddressInformation(addr, access_size, &heap_descr)) { + heap_descr.Print(); + return; + } + + // We exhausted our possibilities. Bail out. + Printf( + "AddressSanitizer can not describe address in more detail " + "(wild memory access suspected).\n"); +} +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_descriptions.h b/contrib/compiler-rt/lib/asan/asan_descriptions.h new file mode 100644 index 000000000000..5c2d7662a35a --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_descriptions.h @@ -0,0 +1,263 @@ +//===-- asan_descriptions.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_descriptions.cc. +// TODO(filcab): Most struct definitions should move to the interface headers. +//===----------------------------------------------------------------------===// +#ifndef ASAN_DESCRIPTIONS_H +#define ASAN_DESCRIPTIONS_H + +#include "asan_allocator.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_report_decorator.h" + +namespace __asan { + +void DescribeThread(AsanThreadContext *context); +static inline void DescribeThread(AsanThread *t) { + if (t) DescribeThread(t->context()); +} + +class AsanThreadIdAndName { + public: + explicit AsanThreadIdAndName(AsanThreadContext *t); + explicit AsanThreadIdAndName(u32 tid); + + // Contains "T%tid (%name)" or "T%tid" if the name is empty. + const char *c_str() const { return &name[0]; } + + private: + void Init(u32 tid, const char *tname); + + char name[128]; +}; + +class Decorator : public __sanitizer::SanitizerCommonDecorator { + public: + Decorator() : SanitizerCommonDecorator() {} + const char *Access() { return Blue(); } + const char *Location() { return Green(); } + const char *Allocation() { return Magenta(); } + + const char *ShadowByte(u8 byte) { + switch (byte) { + case kAsanHeapLeftRedzoneMagic: + case kAsanArrayCookieMagic: + return Red(); + case kAsanHeapFreeMagic: + return Magenta(); + case kAsanStackLeftRedzoneMagic: + case kAsanStackMidRedzoneMagic: + case kAsanStackRightRedzoneMagic: + return Red(); + case kAsanStackAfterReturnMagic: + return Magenta(); + case kAsanInitializationOrderMagic: + return Cyan(); + case kAsanUserPoisonedMemoryMagic: + case kAsanContiguousContainerOOBMagic: + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + return Blue(); + case kAsanStackUseAfterScopeMagic: + return Magenta(); + case kAsanGlobalRedzoneMagic: + return Red(); + case kAsanInternalHeapMagic: + return Yellow(); + case kAsanIntraObjectRedzone: + return Yellow(); + default: + return Default(); + } + } +}; + +enum ShadowKind : u8 { + kShadowKindLow, + kShadowKindGap, + kShadowKindHigh, +}; +static const char *const ShadowNames[] = {"low shadow", "shadow gap", + "high shadow"}; + +struct ShadowAddressDescription { + uptr addr; + ShadowKind kind; + u8 shadow_byte; + + void Print() const; +}; + +bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr); +bool DescribeAddressIfShadow(uptr addr); + +enum AccessType { + kAccessTypeLeft, + kAccessTypeRight, + kAccessTypeInside, + kAccessTypeUnknown, // This means we have an AddressSanitizer bug! +}; + +struct ChunkAccess { + uptr bad_addr; + sptr offset; + uptr chunk_begin; + uptr chunk_size; + u32 user_requested_alignment : 12; + u32 access_type : 2; + u32 alloc_type : 2; +}; + +struct HeapAddressDescription { + uptr addr; + uptr alloc_tid; + uptr free_tid; + u32 alloc_stack_id; + u32 free_stack_id; + ChunkAccess chunk_access; + + void Print() const; +}; + +bool GetHeapAddressInformation(uptr addr, uptr access_size, + HeapAddressDescription *descr); +bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1); + +struct StackAddressDescription { + uptr addr; + uptr tid; + uptr offset; + uptr frame_pc; + uptr access_size; + const char *frame_descr; + + void Print() const; +}; + +bool GetStackAddressInformation(uptr addr, uptr access_size, + StackAddressDescription *descr); + +struct GlobalAddressDescription { + uptr addr; + // Assume address is close to at most four globals. + static const int kMaxGlobals = 4; + __asan_global globals[kMaxGlobals]; + u32 reg_sites[kMaxGlobals]; + uptr access_size; + u8 size; + + void Print(const char *bug_type = "") const; + + // Returns true when this descriptions points inside the same global variable + // as other. Descriptions can have different address within the variable + bool PointsInsideTheSameVariable(const GlobalAddressDescription &other) const; +}; + +bool GetGlobalAddressInformation(uptr addr, uptr access_size, + GlobalAddressDescription *descr); +bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type); + +// General function to describe an address. Will try to describe the address as +// a shadow, global (variable), stack, or heap address. +// bug_type is optional and is used for checking if we're reporting an +// initialization-order-fiasco +// The proper access_size should be passed for stack, global, and heap +// addresses. Defaults to 1. +// Each of the *AddressDescription functions has its own Print() member, which +// may take access_size and bug_type parameters if needed. +void PrintAddressDescription(uptr addr, uptr access_size = 1, + const char *bug_type = ""); + +enum AddressKind { + kAddressKindWild, + kAddressKindShadow, + kAddressKindHeap, + kAddressKindStack, + kAddressKindGlobal, +}; + +class AddressDescription { + struct AddressDescriptionData { + AddressKind kind; + union { + ShadowAddressDescription shadow; + HeapAddressDescription heap; + StackAddressDescription stack; + GlobalAddressDescription global; + uptr addr; + }; + }; + + AddressDescriptionData data; + + public: + AddressDescription() = default; + // shouldLockThreadRegistry allows us to skip locking if we're sure we already + // have done it. + AddressDescription(uptr addr, bool shouldLockThreadRegistry = true) + : AddressDescription(addr, 1, shouldLockThreadRegistry) {} + AddressDescription(uptr addr, uptr access_size, + bool shouldLockThreadRegistry = true); + + uptr Address() const { + switch (data.kind) { + case kAddressKindWild: + return data.addr; + case kAddressKindShadow: + return data.shadow.addr; + case kAddressKindHeap: + return data.heap.addr; + case kAddressKindStack: + return data.stack.addr; + case kAddressKindGlobal: + return data.global.addr; + } + UNREACHABLE("AddressInformation kind is invalid"); + } + void Print(const char *bug_descr = nullptr) const { + switch (data.kind) { + case kAddressKindWild: + Printf("Address %p is a wild pointer.\n", data.addr); + return; + case kAddressKindShadow: + return data.shadow.Print(); + case kAddressKindHeap: + return data.heap.Print(); + case kAddressKindStack: + return data.stack.Print(); + case kAddressKindGlobal: + // initialization-order-fiasco has a special Print() + return data.global.Print(bug_descr); + } + UNREACHABLE("AddressInformation kind is invalid"); + } + + void StoreTo(AddressDescriptionData *dst) const { *dst = data; } + + const ShadowAddressDescription *AsShadow() const { + return data.kind == kAddressKindShadow ? &data.shadow : nullptr; + } + const HeapAddressDescription *AsHeap() const { + return data.kind == kAddressKindHeap ? &data.heap : nullptr; + } + const StackAddressDescription *AsStack() const { + return data.kind == kAddressKindStack ? &data.stack : nullptr; + } + const GlobalAddressDescription *AsGlobal() const { + return data.kind == kAddressKindGlobal ? &data.global : nullptr; + } +}; + +} // namespace __asan + +#endif // ASAN_DESCRIPTIONS_H diff --git a/contrib/compiler-rt/lib/asan/asan_errors.cc b/contrib/compiler-rt/lib/asan/asan_errors.cc new file mode 100644 index 000000000000..0ecd30dcacfd --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_errors.cc @@ -0,0 +1,585 @@ +//===-- asan_errors.cc ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan implementation for error structures. +//===----------------------------------------------------------------------===// + +#include "asan_errors.h" +#include "asan_descriptions.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +static void OnStackUnwind(const SignalContext &sig, + const void *callback_context, + BufferedStackTrace *stack) { + bool fast = common_flags()->fast_unwind_on_fatal; +#if SANITIZER_FREEBSD || SANITIZER_NETBSD + // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace() + // yields the call stack of the signal's handler and not of the code + // that raised the signal (as it does on Linux). + fast = true; +#endif + // Tests and maybe some users expect that scariness is going to be printed + // just before the stack. As only asan has scariness score we have no + // corresponding code in the sanitizer_common and we use this callback to + // print it. + static_cast<const ScarinessScoreBase *>(callback_context)->Print(); + GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context, fast); +} + +void ErrorDeadlySignal::Print() { + ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness); +} + +void ErrorDoubleFree::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n", + scariness.GetDescription(), addr_description.addr, + AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + scariness.Print(); + GET_STACK_TRACE_FATAL(second_free_stack->trace[0], + second_free_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); +} + +void ErrorNewDeleteTypeMismatch::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: %s on %p in thread %s:\n", + scariness.GetDescription(), addr_description.addr, + AsanThreadIdAndName(tid).c_str()); + Printf("%s object passed to delete has wrong type:\n", d.Default()); + if (delete_size != 0) { + Printf( + " size of the allocated type: %zd bytes;\n" + " size of the deallocated type: %zd bytes.\n", + addr_description.chunk_access.chunk_size, delete_size); + } + const uptr user_alignment = + addr_description.chunk_access.user_requested_alignment; + if (delete_alignment != user_alignment) { + char user_alignment_str[32]; + char delete_alignment_str[32]; + internal_snprintf(user_alignment_str, sizeof(user_alignment_str), + "%zd bytes", user_alignment); + internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str), + "%zd bytes", delete_alignment); + static const char *kDefaultAlignment = "default-aligned"; + Printf( + " alignment of the allocated type: %s;\n" + " alignment of the deallocated type: %s.\n", + user_alignment > 0 ? user_alignment_str : kDefaultAlignment, + delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment); + } + CHECK_GT(free_stack->size, 0); + scariness.Print(); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); + Report( + "HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=new_delete_type_mismatch=0\n"); +} + +void ErrorFreeNotMalloced::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting free on address " + "which was not malloc()-ed: %p in thread %s\n", + addr_description.Address(), AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + CHECK_GT(free_stack->size, 0); + scariness.Print(); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); +} + +void ErrorAllocTypeMismatch::Print() { + static const char *alloc_names[] = {"INVALID", "malloc", "operator new", + "operator new []"}; + static const char *dealloc_names[] = {"INVALID", "free", "operator delete", + "operator delete []"}; + CHECK_NE(alloc_type, dealloc_type); + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n", + scariness.GetDescription(), alloc_names[alloc_type], + dealloc_names[dealloc_type], addr_description.Address()); + Printf("%s", d.Default()); + CHECK_GT(dealloc_stack->size, 0); + scariness.Print(); + GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); + Report( + "HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); +} + +void ErrorMallocUsableSizeNotOwned::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for " + "pointer which is not owned: %p\n", + addr_description.Address()); + Printf("%s", d.Default()); + stack->Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorSanitizerGetAllocatedSizeNotOwned::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting to call " + "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n", + addr_description.Address()); + Printf("%s", d.Default()); + stack->Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorCallocOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: calloc parameters overflow: count * size " + "(%zd * %zd) cannot be represented in type size_t (thread %s)\n", + count, size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorPvallocOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: pvalloc parameters overflow: size 0x%zx " + "rounded up to system page size 0x%zx cannot be represented in type " + "size_t (thread %s)\n", + size, GetPageSizeCached(), AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidAllocationAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: invalid allocation alignment: %zd, " + "alignment must be a power of two (thread %s)\n", + alignment, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidAlignedAllocAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); +#if SANITIZER_POSIX + Report("ERROR: AddressSanitizer: invalid alignment requested in " + "aligned_alloc: %zd, alignment must be a power of two and the " + "requested size 0x%zx must be a multiple of alignment " + "(thread %s)\n", alignment, size, AsanThreadIdAndName(tid).c_str()); +#else + Report("ERROR: AddressSanitizer: invalid alignment requested in " + "aligned_alloc: %zd, the requested size 0x%zx must be a multiple of " + "alignment (thread %s)\n", alignment, size, + AsanThreadIdAndName(tid).c_str()); +#endif + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidPosixMemalignAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: invalid alignment requested in posix_memalign: " + "%zd, alignment must be a power of two and a multiple of sizeof(void*) " + "== %zd (thread %s)\n", + alignment, sizeof(void*), AsanThreadIdAndName(tid).c_str()); // NOLINT + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorAllocationSizeTooBig::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: requested allocation size 0x%zx (0x%zx after " + "adjustments for alignment, red zones etc.) exceeds maximum supported " + "size of 0x%zx (thread %s)\n", + user_size, total_size, max_size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorRssLimitExceeded::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: specified RSS limit exceeded, currently set to " + "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorOutOfMemory::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: allocator is out of memory trying to allocate " + "0x%zx bytes\n", requested_size); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorStringFunctionMemoryRangesOverlap::Print() { + Decorator d; + char bug_type[100]; + internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) " + "overlap\n", + bug_type, addr1_description.Address(), + addr1_description.Address() + length1, addr2_description.Address(), + addr2_description.Address() + length2); + Printf("%s", d.Default()); + scariness.Print(); + stack->Print(); + addr1_description.Print(); + addr2_description.Print(); + ReportErrorSummary(bug_type, stack); +} + +void ErrorStringFunctionSizeOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", + scariness.GetDescription(), size); + Printf("%s", d.Default()); + scariness.Print(); + stack->Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorBadParamsToAnnotateContiguousContainer::Print() { + Report( + "ERROR: AddressSanitizer: bad parameters to " + "__sanitizer_annotate_contiguous_container:\n" + " beg : %p\n" + " end : %p\n" + " old_mid : %p\n" + " new_mid : %p\n", + beg, end, old_mid, new_mid); + uptr granularity = SHADOW_GRANULARITY; + if (!IsAligned(beg, granularity)) + Report("ERROR: beg is not aligned by %d\n", granularity); + stack->Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorODRViolation::Print() { + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(), + global1.beg); + Printf("%s", d.Default()); + InternalScopedString g1_loc(256), g2_loc(256); + PrintGlobalLocation(&g1_loc, global1); + PrintGlobalLocation(&g2_loc, global2); + Printf(" [1] size=%zd '%s' %s\n", global1.size, + MaybeDemangleGlobalName(global1.name), g1_loc.data()); + Printf(" [2] size=%zd '%s' %s\n", global2.size, + MaybeDemangleGlobalName(global2.name), g2_loc.data()); + if (stack_id1 && stack_id2) { + Printf("These globals were registered at these points:\n"); + Printf(" [1]:\n"); + StackDepotGet(stack_id1).Print(); + Printf(" [2]:\n"); + StackDepotGet(stack_id2).Print(); + } + Report( + "HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=detect_odr_violation=0\n"); + InternalScopedString error_msg(256); + error_msg.append("%s: global '%s' at %s", scariness.GetDescription(), + MaybeDemangleGlobalName(global1.name), g1_loc.data()); + ReportErrorSummary(error_msg.data()); +} + +void ErrorInvalidPointerPair::Print() { + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(), + addr1_description.Address(), addr2_description.Address()); + Printf("%s", d.Default()); + GET_STACK_TRACE_FATAL(pc, bp); + stack.Print(); + addr1_description.Print(); + addr2_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); +} + +static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) { + return s[-1] > 127 && s[1] > 127; +} + +ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, + bool is_write_, uptr access_size_) + : ErrorBase(tid), + addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false), + pc(pc_), + bp(bp_), + sp(sp_), + access_size(access_size_), + is_write(is_write_), + shadow_val(0) { + scariness.Clear(); + if (access_size) { + if (access_size <= 9) { + char desr[] = "?-byte"; + desr[0] = '0' + access_size; + scariness.Scare(access_size + access_size / 2, desr); + } else if (access_size >= 10) { + scariness.Scare(15, "multi-byte"); + } + is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read"); + + // Determine the error type. + bug_descr = "unknown-crash"; + if (AddrIsInMem(addr)) { + u8 *shadow_addr = (u8 *)MemToShadow(addr); + // If we are accessing 16 bytes, look at the second shadow byte. + if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++; + // If we are in the partial right redzone, look at the next shadow byte. + if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++; + bool far_from_bounds = false; + shadow_val = *shadow_addr; + int bug_type_score = 0; + // For use-after-frees reads are almost as bad as writes. + int read_after_free_bonus = 0; + switch (shadow_val) { + case kAsanHeapLeftRedzoneMagic: + case kAsanArrayCookieMagic: + bug_descr = "heap-buffer-overflow"; + bug_type_score = 10; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanHeapFreeMagic: + bug_descr = "heap-use-after-free"; + bug_type_score = 20; + if (!is_write) read_after_free_bonus = 18; + break; + case kAsanStackLeftRedzoneMagic: + bug_descr = "stack-buffer-underflow"; + bug_type_score = 25; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanInitializationOrderMagic: + bug_descr = "initialization-order-fiasco"; + bug_type_score = 1; + break; + case kAsanStackMidRedzoneMagic: + case kAsanStackRightRedzoneMagic: + bug_descr = "stack-buffer-overflow"; + bug_type_score = 25; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanStackAfterReturnMagic: + bug_descr = "stack-use-after-return"; + bug_type_score = 30; + if (!is_write) read_after_free_bonus = 18; + break; + case kAsanUserPoisonedMemoryMagic: + bug_descr = "use-after-poison"; + bug_type_score = 20; + break; + case kAsanContiguousContainerOOBMagic: + bug_descr = "container-overflow"; + bug_type_score = 10; + break; + case kAsanStackUseAfterScopeMagic: + bug_descr = "stack-use-after-scope"; + bug_type_score = 10; + break; + case kAsanGlobalRedzoneMagic: + bug_descr = "global-buffer-overflow"; + bug_type_score = 10; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanIntraObjectRedzone: + bug_descr = "intra-object-overflow"; + bug_type_score = 10; + break; + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + bug_descr = "dynamic-stack-buffer-overflow"; + bug_type_score = 25; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + } + scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr); + if (far_from_bounds) scariness.Scare(10, "far-from-bounds"); + } + } +} + +static void PrintContainerOverflowHint() { + Printf("HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=detect_container_overflow=0.\n" + "If you suspect a false positive see also: " + "https://github.com/google/sanitizers/wiki/" + "AddressSanitizerContainerOverflow.\n"); +} + +static void PrintShadowByte(InternalScopedString *str, const char *before, + u8 byte, const char *after = "\n") { + PrintMemoryByte(str, before, byte, /*in_shadow*/true, after); +} + +static void PrintLegend(InternalScopedString *str) { + str->append( + "Shadow byte legend (one shadow byte represents %d " + "application bytes):\n", + (int)SHADOW_GRANULARITY); + PrintShadowByte(str, " Addressable: ", 0); + str->append(" Partially addressable: "); + for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " "); + str->append("\n"); + PrintShadowByte(str, " Heap left redzone: ", + kAsanHeapLeftRedzoneMagic); + PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic); + PrintShadowByte(str, " Stack left redzone: ", + kAsanStackLeftRedzoneMagic); + PrintShadowByte(str, " Stack mid redzone: ", + kAsanStackMidRedzoneMagic); + PrintShadowByte(str, " Stack right redzone: ", + kAsanStackRightRedzoneMagic); + PrintShadowByte(str, " Stack after return: ", + kAsanStackAfterReturnMagic); + PrintShadowByte(str, " Stack use after scope: ", + kAsanStackUseAfterScopeMagic); + PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic); + PrintShadowByte(str, " Global init order: ", + kAsanInitializationOrderMagic); + PrintShadowByte(str, " Poisoned by user: ", + kAsanUserPoisonedMemoryMagic); + PrintShadowByte(str, " Container overflow: ", + kAsanContiguousContainerOOBMagic); + PrintShadowByte(str, " Array cookie: ", + kAsanArrayCookieMagic); + PrintShadowByte(str, " Intra object redzone: ", + kAsanIntraObjectRedzone); + PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); + PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); + PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); + PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap); +} + +static void PrintShadowBytes(InternalScopedString *str, const char *before, + u8 *bytes, u8 *guilty, uptr n) { + Decorator d; + if (before) str->append("%s%p:", before, bytes); + for (uptr i = 0; i < n; i++) { + u8 *p = bytes + i; + const char *before = + p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " "; + const char *after = p == guilty ? "]" : ""; + PrintShadowByte(str, before, *p, after); + } + str->append("\n"); +} + +static void PrintShadowMemoryForAddress(uptr addr) { + if (!AddrIsInMem(addr)) return; + uptr shadow_addr = MemToShadow(addr); + const uptr n_bytes_per_row = 16; + uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); + InternalScopedString str(4096 * 8); + str.append("Shadow bytes around the buggy address:\n"); + for (int i = -5; i <= 5; i++) { + uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row; + // Skip rows that would be outside the shadow range. This can happen when + // the user address is near the bottom, top, or shadow gap of the address + // space. + if (!AddrIsInShadow(row_shadow_addr)) continue; + const char *prefix = (i == 0) ? "=>" : " "; + PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr, + n_bytes_per_row); + } + if (flags()->print_legend) PrintLegend(&str); + Printf("%s", str.data()); +} + +void ErrorGeneric::Print() { + Decorator d; + Printf("%s", d.Error()); + uptr addr = addr_description.Address(); + Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n", + bug_descr, (void *)addr, pc, bp, sp); + Printf("%s", d.Default()); + + Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(), + access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size, + (void *)addr, AsanThreadIdAndName(tid).c_str(), d.Default()); + + scariness.Print(); + GET_STACK_TRACE_FATAL(pc, bp); + stack.Print(); + + // Pass bug_descr because we have a special case for + // initialization-order-fiasco + addr_description.Print(bug_descr); + if (shadow_val == kAsanContiguousContainerOOBMagic) + PrintContainerOverflowHint(); + ReportErrorSummary(bug_descr, &stack); + PrintShadowMemoryForAddress(addr); +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_errors.h b/contrib/compiler-rt/lib/asan/asan_errors.h new file mode 100644 index 000000000000..7ddd7e94e0fc --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_errors.h @@ -0,0 +1,438 @@ +//===-- asan_errors.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for error structures. +//===----------------------------------------------------------------------===// +#ifndef ASAN_ERRORS_H +#define ASAN_ERRORS_H + +#include "asan_descriptions.h" +#include "asan_scariness_score.h" +#include "sanitizer_common/sanitizer_common.h" + +namespace __asan { + +// (*) VS2013 does not implement unrestricted unions, so we need a trivial +// default constructor explicitly defined for each particular error. + +// None of the error classes own the stack traces mentioned in them. + +struct ErrorBase { + ScarinessScoreBase scariness; + u32 tid; + + ErrorBase() = default; // (*) + explicit ErrorBase(u32 tid_) : tid(tid_) {} + ErrorBase(u32 tid_, int initial_score, const char *reason) : tid(tid_) { + scariness.Clear(); + scariness.Scare(initial_score, reason); + } +}; + +struct ErrorDeadlySignal : ErrorBase { + SignalContext signal; + + ErrorDeadlySignal() = default; // (*) + ErrorDeadlySignal(u32 tid, const SignalContext &sig) + : ErrorBase(tid), + signal(sig) { + scariness.Clear(); + if (signal.IsStackOverflow()) { + scariness.Scare(10, "stack-overflow"); + } else if (!signal.is_memory_access) { + scariness.Scare(10, "signal"); + } else if (signal.addr < GetPageSizeCached()) { + scariness.Scare(10, "null-deref"); + } else if (signal.addr == signal.pc) { + scariness.Scare(60, "wild-jump"); + } else if (signal.write_flag == SignalContext::WRITE) { + scariness.Scare(30, "wild-addr-write"); + } else if (signal.write_flag == SignalContext::READ) { + scariness.Scare(20, "wild-addr-read"); + } else { + scariness.Scare(25, "wild-addr"); + } + } + void Print(); +}; + +struct ErrorDoubleFree : ErrorBase { + const BufferedStackTrace *second_free_stack; + HeapAddressDescription addr_description; + + ErrorDoubleFree() = default; // (*) + ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr) + : ErrorBase(tid, 42, "double-free"), + second_free_stack(stack) { + CHECK_GT(second_free_stack->size, 0); + GetHeapAddressInformation(addr, 1, &addr_description); + } + void Print(); +}; + +struct ErrorNewDeleteTypeMismatch : ErrorBase { + const BufferedStackTrace *free_stack; + HeapAddressDescription addr_description; + uptr delete_size; + uptr delete_alignment; + + ErrorNewDeleteTypeMismatch() = default; // (*) + ErrorNewDeleteTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr, + uptr delete_size_, uptr delete_alignment_) + : ErrorBase(tid, 10, "new-delete-type-mismatch"), + free_stack(stack), + delete_size(delete_size_), + delete_alignment(delete_alignment_) { + GetHeapAddressInformation(addr, 1, &addr_description); + } + void Print(); +}; + +struct ErrorFreeNotMalloced : ErrorBase { + const BufferedStackTrace *free_stack; + AddressDescription addr_description; + + ErrorFreeNotMalloced() = default; // (*) + ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr) + : ErrorBase(tid, 40, "bad-free"), + free_stack(stack), + addr_description(addr, /*shouldLockThreadRegistry=*/false) {} + void Print(); +}; + +struct ErrorAllocTypeMismatch : ErrorBase { + const BufferedStackTrace *dealloc_stack; + AllocType alloc_type, dealloc_type; + AddressDescription addr_description; + + ErrorAllocTypeMismatch() = default; // (*) + ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr, + AllocType alloc_type_, AllocType dealloc_type_) + : ErrorBase(tid, 10, "alloc-dealloc-mismatch"), + dealloc_stack(stack), + alloc_type(alloc_type_), + dealloc_type(dealloc_type_), + addr_description(addr, 1, false) {} + void Print(); +}; + +struct ErrorMallocUsableSizeNotOwned : ErrorBase { + const BufferedStackTrace *stack; + AddressDescription addr_description; + + ErrorMallocUsableSizeNotOwned() = default; // (*) + ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr) + : ErrorBase(tid, 10, "bad-malloc_usable_size"), + stack(stack_), + addr_description(addr, /*shouldLockThreadRegistry=*/false) {} + void Print(); +}; + +struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase { + const BufferedStackTrace *stack; + AddressDescription addr_description; + + ErrorSanitizerGetAllocatedSizeNotOwned() = default; // (*) + ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_, + uptr addr) + : ErrorBase(tid, 10, "bad-__sanitizer_get_allocated_size"), + stack(stack_), + addr_description(addr, /*shouldLockThreadRegistry=*/false) {} + void Print(); +}; + +struct ErrorCallocOverflow : ErrorBase { + const BufferedStackTrace *stack; + uptr count; + uptr size; + + ErrorCallocOverflow() = default; // (*) + ErrorCallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr count_, + uptr size_) + : ErrorBase(tid, 10, "calloc-overflow"), + stack(stack_), + count(count_), + size(size_) {} + void Print(); +}; + +struct ErrorPvallocOverflow : ErrorBase { + const BufferedStackTrace *stack; + uptr size; + + ErrorPvallocOverflow() = default; // (*) + ErrorPvallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr size_) + : ErrorBase(tid, 10, "pvalloc-overflow"), + stack(stack_), + size(size_) {} + void Print(); +}; + +struct ErrorInvalidAllocationAlignment : ErrorBase { + const BufferedStackTrace *stack; + uptr alignment; + + ErrorInvalidAllocationAlignment() = default; // (*) + ErrorInvalidAllocationAlignment(u32 tid, BufferedStackTrace *stack_, + uptr alignment_) + : ErrorBase(tid, 10, "invalid-allocation-alignment"), + stack(stack_), + alignment(alignment_) {} + void Print(); +}; + +struct ErrorInvalidAlignedAllocAlignment : ErrorBase { + const BufferedStackTrace *stack; + uptr size; + uptr alignment; + + ErrorInvalidAlignedAllocAlignment() = default; // (*) + ErrorInvalidAlignedAllocAlignment(u32 tid, BufferedStackTrace *stack_, + uptr size_, uptr alignment_) + : ErrorBase(tid, 10, "invalid-aligned-alloc-alignment"), + stack(stack_), + size(size_), + alignment(alignment_) {} + void Print(); +}; + +struct ErrorInvalidPosixMemalignAlignment : ErrorBase { + const BufferedStackTrace *stack; + uptr alignment; + + ErrorInvalidPosixMemalignAlignment() = default; // (*) + ErrorInvalidPosixMemalignAlignment(u32 tid, BufferedStackTrace *stack_, + uptr alignment_) + : ErrorBase(tid, 10, "invalid-posix-memalign-alignment"), + stack(stack_), + alignment(alignment_) {} + void Print(); +}; + +struct ErrorAllocationSizeTooBig : ErrorBase { + const BufferedStackTrace *stack; + uptr user_size; + uptr total_size; + uptr max_size; + + ErrorAllocationSizeTooBig() = default; // (*) + ErrorAllocationSizeTooBig(u32 tid, BufferedStackTrace *stack_, + uptr user_size_, uptr total_size_, uptr max_size_) + : ErrorBase(tid, 10, "allocation-size-too-big"), + stack(stack_), + user_size(user_size_), + total_size(total_size_), + max_size(max_size_) {} + void Print(); +}; + +struct ErrorRssLimitExceeded : ErrorBase { + const BufferedStackTrace *stack; + + ErrorRssLimitExceeded() = default; // (*) + ErrorRssLimitExceeded(u32 tid, BufferedStackTrace *stack_) + : ErrorBase(tid, 10, "rss-limit-exceeded"), + stack(stack_) {} + void Print(); +}; + +struct ErrorOutOfMemory : ErrorBase { + const BufferedStackTrace *stack; + uptr requested_size; + + ErrorOutOfMemory() = default; // (*) + ErrorOutOfMemory(u32 tid, BufferedStackTrace *stack_, uptr requested_size_) + : ErrorBase(tid, 10, "out-of-memory"), + stack(stack_), + requested_size(requested_size_) {} + void Print(); +}; + +struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase { + const BufferedStackTrace *stack; + uptr length1, length2; + AddressDescription addr1_description; + AddressDescription addr2_description; + const char *function; + + ErrorStringFunctionMemoryRangesOverlap() = default; // (*) + ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_, + uptr addr1, uptr length1_, uptr addr2, + uptr length2_, const char *function_) + : ErrorBase(tid), + stack(stack_), + length1(length1_), + length2(length2_), + addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false), + addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false), + function(function_) { + char bug_type[100]; + internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); + scariness.Clear(); + scariness.Scare(10, bug_type); + } + void Print(); +}; + +struct ErrorStringFunctionSizeOverflow : ErrorBase { + const BufferedStackTrace *stack; + AddressDescription addr_description; + uptr size; + + ErrorStringFunctionSizeOverflow() = default; // (*) + ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_, + uptr addr, uptr size_) + : ErrorBase(tid, 10, "negative-size-param"), + stack(stack_), + addr_description(addr, /*shouldLockThreadRegistry=*/false), + size(size_) {} + void Print(); +}; + +struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase { + const BufferedStackTrace *stack; + uptr beg, end, old_mid, new_mid; + + ErrorBadParamsToAnnotateContiguousContainer() = default; // (*) + // PS4: Do we want an AddressDescription for beg? + ErrorBadParamsToAnnotateContiguousContainer(u32 tid, + BufferedStackTrace *stack_, + uptr beg_, uptr end_, + uptr old_mid_, uptr new_mid_) + : ErrorBase(tid, 10, "bad-__sanitizer_annotate_contiguous_container"), + stack(stack_), + beg(beg_), + end(end_), + old_mid(old_mid_), + new_mid(new_mid_) {} + void Print(); +}; + +struct ErrorODRViolation : ErrorBase { + __asan_global global1, global2; + u32 stack_id1, stack_id2; + + ErrorODRViolation() = default; // (*) + ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_, + const __asan_global *g2, u32 stack_id2_) + : ErrorBase(tid, 10, "odr-violation"), + global1(*g1), + global2(*g2), + stack_id1(stack_id1_), + stack_id2(stack_id2_) {} + void Print(); +}; + +struct ErrorInvalidPointerPair : ErrorBase { + uptr pc, bp, sp; + AddressDescription addr1_description; + AddressDescription addr2_description; + + ErrorInvalidPointerPair() = default; // (*) + ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1, + uptr p2) + : ErrorBase(tid, 10, "invalid-pointer-pair"), + pc(pc_), + bp(bp_), + sp(sp_), + addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false), + addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {} + void Print(); +}; + +struct ErrorGeneric : ErrorBase { + AddressDescription addr_description; + uptr pc, bp, sp; + uptr access_size; + const char *bug_descr; + bool is_write; + u8 shadow_val; + + ErrorGeneric() = default; // (*) + ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_, + uptr access_size_); + void Print(); +}; + +// clang-format off +#define ASAN_FOR_EACH_ERROR_KIND(macro) \ + macro(DeadlySignal) \ + macro(DoubleFree) \ + macro(NewDeleteTypeMismatch) \ + macro(FreeNotMalloced) \ + macro(AllocTypeMismatch) \ + macro(MallocUsableSizeNotOwned) \ + macro(SanitizerGetAllocatedSizeNotOwned) \ + macro(CallocOverflow) \ + macro(PvallocOverflow) \ + macro(InvalidAllocationAlignment) \ + macro(InvalidAlignedAllocAlignment) \ + macro(InvalidPosixMemalignAlignment) \ + macro(AllocationSizeTooBig) \ + macro(RssLimitExceeded) \ + macro(OutOfMemory) \ + macro(StringFunctionMemoryRangesOverlap) \ + macro(StringFunctionSizeOverflow) \ + macro(BadParamsToAnnotateContiguousContainer) \ + macro(ODRViolation) \ + macro(InvalidPointerPair) \ + macro(Generic) +// clang-format on + +#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name, +#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name; +#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \ + ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {} +#define ASAN_ERROR_DESCRIPTION_PRINT(name) \ + case kErrorKind##name: \ + return name.Print(); + +enum ErrorKind { + kErrorKindInvalid = 0, + ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND) +}; + +struct ErrorDescription { + ErrorKind kind; + // We're using a tagged union because it allows us to have a trivially + // copiable type and use the same structures as the public interface. + // + // We can add a wrapper around it to make it "more c++-like", but that would + // add a lot of code and the benefit wouldn't be that big. + union { + ErrorBase Base; + ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER) + }; + + ErrorDescription() { internal_memset(this, 0, sizeof(*this)); } + explicit ErrorDescription(LinkerInitialized) {} + ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR) + + bool IsValid() { return kind != kErrorKindInvalid; } + void Print() { + switch (kind) { + ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT) + case kErrorKindInvalid: + CHECK(0); + } + CHECK(0); + } +}; + +#undef ASAN_FOR_EACH_ERROR_KIND +#undef ASAN_DEFINE_ERROR_KIND +#undef ASAN_ERROR_DESCRIPTION_MEMBER +#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR +#undef ASAN_ERROR_DESCRIPTION_PRINT + +} // namespace __asan + +#endif // ASAN_ERRORS_H diff --git a/contrib/compiler-rt/lib/asan/asan_fake_stack.cc b/contrib/compiler-rt/lib/asan/asan_fake_stack.cc new file mode 100644 index 000000000000..1c6184e3c7fc --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_fake_stack.cc @@ -0,0 +1,283 @@ +//===-- asan_fake_stack.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// FakeStack is used to detect use-after-return bugs. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_poisoning.h" +#include "asan_thread.h" + +namespace __asan { + +static const u64 kMagic1 = kAsanStackAfterReturnMagic; +static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; +static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; +static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; + +static const u64 kAllocaRedzoneSize = 32UL; +static const u64 kAllocaRedzoneMask = 31UL; + +// For small size classes inline PoisonShadow for better performance. +ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { + u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); + if (SHADOW_SCALE == 3 && class_id <= 6) { + // This code expects SHADOW_SCALE=3. + for (uptr i = 0; i < (((uptr)1) << class_id); i++) { + shadow[i] = magic; + // Make sure this does not become memset. + SanitizerBreakOptimization(nullptr); + } + } else { + // The size class is too big, it's cheaper to poison only size bytes. + PoisonShadow(ptr, size, static_cast<u8>(magic)); + } +} + +FakeStack *FakeStack::Create(uptr stack_size_log) { + static uptr kMinStackSizeLog = 16; + static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); + if (stack_size_log < kMinStackSizeLog) + stack_size_log = kMinStackSizeLog; + if (stack_size_log > kMaxStackSizeLog) + stack_size_log = kMaxStackSizeLog; + uptr size = RequiredSize(stack_size_log); + FakeStack *res = reinterpret_cast<FakeStack *>( + flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") + : MmapOrDie(size, "FakeStack")); + res->stack_size_log_ = stack_size_log; + u8 *p = reinterpret_cast<u8 *>(res); + VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " + "mmapped %zdK, noreserve=%d \n", + GetCurrentTidOrInvalid(), p, + p + FakeStack::RequiredSize(stack_size_log), stack_size_log, + size >> 10, flags()->uar_noreserve); + return res; +} + +void FakeStack::Destroy(int tid) { + PoisonAll(0); + if (Verbosity() >= 2) { + InternalScopedString str(kNumberOfSizeClasses * 50); + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) + str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], + NumberOfFrames(stack_size_log(), class_id)); + Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); + } + uptr size = RequiredSize(stack_size_log_); + FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size); + UnmapOrDie(this, size); +} + +void FakeStack::PoisonAll(u8 magic) { + PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()), + magic); +} + +#if !defined(_MSC_VER) || defined(__clang__) +ALWAYS_INLINE USED +#endif +FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, + uptr real_stack) { + CHECK_LT(class_id, kNumberOfSizeClasses); + if (needs_gc_) + GC(real_stack); + uptr &hint_position = hint_position_[class_id]; + const int num_iter = NumberOfFrames(stack_size_log, class_id); + u8 *flags = GetFlags(stack_size_log, class_id); + for (int i = 0; i < num_iter; i++) { + uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); + // This part is tricky. On one hand, checking and setting flags[pos] + // should be atomic to ensure async-signal safety. But on the other hand, + // if the signal arrives between checking and setting flags[pos], the + // signal handler's fake stack will start from a different hint_position + // and so will not touch this particular byte. So, it is safe to do this + // with regular non-atomic load and store (at least I was not able to make + // this code crash). + if (flags[pos]) continue; + flags[pos] = 1; + FakeFrame *res = reinterpret_cast<FakeFrame *>( + GetFrame(stack_size_log, class_id, pos)); + res->real_stack = real_stack; + *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos]; + return res; + } + return nullptr; // We are out of fake stack. +} + +uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { + uptr stack_size_log = this->stack_size_log(); + uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0)); + uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log); + if (ptr < beg || ptr >= end) return 0; + uptr class_id = (ptr - beg) >> stack_size_log; + uptr base = beg + (class_id << stack_size_log); + CHECK_LE(base, ptr); + CHECK_LT(ptr, base + (((uptr)1) << stack_size_log)); + uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); + uptr res = base + pos * BytesInSizeClass(class_id); + *frame_end = res + BytesInSizeClass(class_id); + *frame_beg = res + sizeof(FakeFrame); + return res; +} + +void FakeStack::HandleNoReturn() { + needs_gc_ = true; +} + +// When throw, longjmp or some such happens we don't call OnFree() and +// as the result may leak one or more fake frames, but the good news is that +// we are notified about all such events by HandleNoReturn(). +// If we recently had such no-return event we need to collect garbage frames. +// We do it based on their 'real_stack' values -- everything that is lower +// than the current real_stack is garbage. +NOINLINE void FakeStack::GC(uptr real_stack) { + uptr collected = 0; + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { + u8 *flags = GetFlags(stack_size_log(), class_id); + for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; + i++) { + if (flags[i] == 0) continue; // not allocated. + FakeFrame *ff = reinterpret_cast<FakeFrame *>( + GetFrame(stack_size_log(), class_id, i)); + if (ff->real_stack < real_stack) { + flags[i] = 0; + collected++; + } + } + } + needs_gc_ = false; +} + +void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { + u8 *flags = GetFlags(stack_size_log(), class_id); + for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; + i++) { + if (flags[i] == 0) continue; // not allocated. + FakeFrame *ff = reinterpret_cast<FakeFrame *>( + GetFrame(stack_size_log(), class_id, i)); + uptr begin = reinterpret_cast<uptr>(ff); + callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); + } + } +} + +#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA +static THREADLOCAL FakeStack *fake_stack_tls; + +FakeStack *GetTLSFakeStack() { + return fake_stack_tls; +} +void SetTLSFakeStack(FakeStack *fs) { + fake_stack_tls = fs; +} +#else +FakeStack *GetTLSFakeStack() { return 0; } +void SetTLSFakeStack(FakeStack *fs) { } +#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA + +static FakeStack *GetFakeStack() { + AsanThread *t = GetCurrentThread(); + if (!t) return nullptr; + return t->fake_stack(); +} + +static FakeStack *GetFakeStackFast() { + if (FakeStack *fs = GetTLSFakeStack()) + return fs; + if (!__asan_option_detect_stack_use_after_return) + return nullptr; + return GetFakeStack(); +} + +ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { + FakeStack *fs = GetFakeStackFast(); + if (!fs) return 0; + uptr local_stack; + uptr real_stack = reinterpret_cast<uptr>(&local_stack); + FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); + if (!ff) return 0; // Out of fake stack. + uptr ptr = reinterpret_cast<uptr>(ff); + SetShadow(ptr, size, class_id, 0); + return ptr; +} + +ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { + FakeStack::Deallocate(ptr, class_id); + SetShadow(ptr, size, class_id, kMagic8); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; +#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ + __asan_stack_malloc_##class_id(uptr size) { \ + return OnMalloc(class_id, size); \ + } \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ + uptr ptr, uptr size) { \ + OnFree(ptr, class_id, size); \ + } + +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } + +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, + void **end) { + FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); + if (!fs) return nullptr; + uptr frame_beg, frame_end; + FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( + reinterpret_cast<uptr>(addr), &frame_beg, &frame_end)); + if (!frame) return nullptr; + if (frame->magic != kCurrentStackFrameMagic) + return nullptr; + if (beg) *beg = reinterpret_cast<void*>(frame_beg); + if (end) *end = reinterpret_cast<void*>(frame_end); + return reinterpret_cast<void*>(frame->real_stack); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_alloca_poison(uptr addr, uptr size) { + uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; + uptr PartialRzAddr = addr + size; + uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; + uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); + FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); + FastPoisonShadowPartialRightRedzone( + PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, + RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); + FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_allocas_unpoison(uptr top, uptr bottom) { + if ((!top) || (top > bottom)) return; + REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0, + (bottom - top) / SHADOW_GRANULARITY); +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/asan/asan_fake_stack.h b/contrib/compiler-rt/lib/asan/asan_fake_stack.h new file mode 100644 index 000000000000..da9a91c23025 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_fake_stack.h @@ -0,0 +1,176 @@ +//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_fake_stack.cc, implements FakeStack. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_FAKE_STACK_H +#define ASAN_FAKE_STACK_H + +#include "sanitizer_common/sanitizer_common.h" + +namespace __asan { + +// Fake stack frame contains local variables of one function. +struct FakeFrame { + uptr magic; // Modified by the instrumented code. + uptr descr; // Modified by the instrumented code. + uptr pc; // Modified by the instrumented code. + uptr real_stack; +}; + +// For each thread we create a fake stack and place stack objects on this fake +// stack instead of the real stack. The fake stack is not really a stack but +// a fast malloc-like allocator so that when a function exits the fake stack +// is not popped but remains there for quite some time until gets used again. +// So, we poison the objects on the fake stack when function returns. +// It helps us find use-after-return bugs. +// +// The FakeStack objects is allocated by a single mmap call and has no other +// pointers. The size of the fake stack depends on the actual thread stack size +// and thus can not be a constant. +// stack_size is a power of two greater or equal to the thread's stack size; +// we store it as its logarithm (stack_size_log). +// FakeStack has kNumberOfSizeClasses (11) size classes, each size class +// is a power of two, starting from 64 bytes. Each size class occupies +// stack_size bytes and thus can allocate +// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2). +// For each size class we have NumberOfFrames allocation flags, +// each flag indicates whether the given frame is currently allocated. +// All flags for size classes 0 .. 10 are stored in a single contiguous region +// followed by another contiguous region which contains the actual memory for +// size classes. The addresses are computed by GetFlags and GetFrame without +// any memory accesses solely based on 'this' and stack_size_log. +// Allocate() flips the appropriate allocation flag atomically, thus achieving +// async-signal safety. +// This allocator does not have quarantine per se, but it tries to allocate the +// frames in round robin fashion to maximize the delay between a deallocation +// and the next allocation. +class FakeStack { + static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B. + static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. + + public: + static const uptr kNumberOfSizeClasses = + kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1; + + // CTOR: create the FakeStack as a single mmap-ed object. + static FakeStack *Create(uptr stack_size_log); + + void Destroy(int tid); + + // stack_size_log is at least 15 (stack_size >= 32K). + static uptr SizeRequiredForFlags(uptr stack_size_log) { + return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog); + } + + // Each size class occupies stack_size bytes. + static uptr SizeRequiredForFrames(uptr stack_size_log) { + return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses; + } + + // Number of bytes requires for the whole object. + static uptr RequiredSize(uptr stack_size_log) { + return kFlagsOffset + SizeRequiredForFlags(stack_size_log) + + SizeRequiredForFrames(stack_size_log); + } + + // Offset of the given flag from the first flag. + // The flags for class 0 begin at offset 000000000 + // The flags for class 1 begin at offset 100000000 + // ....................2................ 110000000 + // ....................3................ 111000000 + // and so on. + static uptr FlagsOffset(uptr stack_size_log, uptr class_id) { + uptr t = kNumberOfSizeClasses - 1 - class_id; + const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1; + return ((all_ones >> t) << t) << (stack_size_log - 15); + } + + static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) { + return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id); + } + + // Divide n by the number of frames in size class. + static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) { + return n & (NumberOfFrames(stack_size_log, class_id) - 1); + } + + // The pointer to the flags of the given class_id. + u8 *GetFlags(uptr stack_size_log, uptr class_id) { + return reinterpret_cast<u8 *>(this) + kFlagsOffset + + FlagsOffset(stack_size_log, class_id); + } + + // Get frame by class_id and pos. + u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) { + return reinterpret_cast<u8 *>(this) + kFlagsOffset + + SizeRequiredForFlags(stack_size_log) + + (((uptr)1) << stack_size_log) * class_id + + BytesInSizeClass(class_id) * pos; + } + + // Allocate the fake frame. + FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack); + + // Deallocate the fake frame: read the saved flag address and write 0 there. + static void Deallocate(uptr x, uptr class_id) { + **SavedFlagPtr(x, class_id) = 0; + } + + // Poison the entire FakeStack's shadow with the magic value. + void PoisonAll(u8 magic); + + // Return the beginning of the FakeFrame or 0 if the address is not ours. + uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end); + USED uptr AddrIsInFakeStack(uptr addr) { + uptr t1, t2; + return AddrIsInFakeStack(addr, &t1, &t2); + } + + // Number of bytes in a fake frame of this size class. + static uptr BytesInSizeClass(uptr class_id) { + return ((uptr)1) << (class_id + kMinStackFrameSizeLog); + } + + // The fake frame is guaranteed to have a right redzone. + // We use the last word of that redzone to store the address of the flag + // that corresponds to the current frame to make faster deallocation. + static u8 **SavedFlagPtr(uptr x, uptr class_id) { + return reinterpret_cast<u8 **>(x + BytesInSizeClass(class_id) - sizeof(x)); + } + + uptr stack_size_log() const { return stack_size_log_; } + + void HandleNoReturn(); + void GC(uptr real_stack); + + void ForEachFakeFrame(RangeIteratorCallback callback, void *arg); + + private: + FakeStack() { } + static const uptr kFlagsOffset = 4096; // This is were the flags begin. + // Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID + COMPILER_CHECK(kNumberOfSizeClasses == 11); + static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog; + + uptr hint_position_[kNumberOfSizeClasses]; + uptr stack_size_log_; + // a bit is set if something was allocated from the corresponding size class. + bool needs_gc_; +}; + +FakeStack *GetTLSFakeStack(); +void SetTLSFakeStack(FakeStack *fs); + +} // namespace __asan + +#endif // ASAN_FAKE_STACK_H diff --git a/contrib/compiler-rt/lib/asan/asan_flags.cc b/contrib/compiler-rt/lib/asan/asan_flags.cc new file mode 100644 index 000000000000..5682ab4eb476 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_flags.cc @@ -0,0 +1,215 @@ +//===-- asan_flags.cc -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan flag parsing logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_flags.h" +#include "asan_interface_internal.h" +#include "asan_stack.h" +#include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "ubsan/ubsan_flags.h" +#include "ubsan/ubsan_platform.h" + +namespace __asan { + +Flags asan_flags_dont_use_directly; // use via flags(). + +static const char *MaybeCallAsanDefaultOptions() { + return (&__asan_default_options) ? __asan_default_options() : ""; +} + +static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { +#ifdef ASAN_DEFAULT_OPTIONS + return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + +void Flags::SetDefaults() { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +static void RegisterAsanFlags(FlagParser *parser, Flags *f) { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +void InitializeFlags() { + // Set the default values and prepare for parsing ASan and common flags. + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS; + cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = kDefaultMallocContextSize; + cf.intercept_tls_get_addr = true; + cf.exitcode = 1; + OverrideCommonFlags(cf); + } + Flags *f = flags(); + f->SetDefaults(); + + FlagParser asan_parser; + RegisterAsanFlags(&asan_parser, f); + RegisterCommonFlags(&asan_parser); + + // Set the default values and prepare for parsing LSan and UBSan flags + // (which can also overwrite common flags). +#if CAN_SANITIZE_LEAKS + __lsan::Flags *lf = __lsan::flags(); + lf->SetDefaults(); + + FlagParser lsan_parser; + __lsan::RegisterLsanFlags(&lsan_parser, lf); + RegisterCommonFlags(&lsan_parser); +#endif + +#if CAN_SANITIZE_UB + __ubsan::Flags *uf = __ubsan::flags(); + uf->SetDefaults(); + + FlagParser ubsan_parser; + __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); + RegisterCommonFlags(&ubsan_parser); +#endif + + if (SANITIZER_MAC) { + // Support macOS MallocScribble and MallocPreScribble: + // <https://developer.apple.com/library/content/documentation/Performance/ + // Conceptual/ManagingMemory/Articles/MallocDebug.html> + if (GetEnv("MallocScribble")) { + f->max_free_fill_size = 0x1000; + } + if (GetEnv("MallocPreScribble")) { + f->malloc_fill_byte = 0xaa; + } + } + + // Override from ASan compile definition. + const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition(); + asan_parser.ParseString(asan_compile_def); + + // Override from user-specified string. + const char *asan_default_options = MaybeCallAsanDefaultOptions(); + asan_parser.ParseString(asan_default_options); +#if CAN_SANITIZE_UB + const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); + ubsan_parser.ParseString(ubsan_default_options); +#endif +#if CAN_SANITIZE_LEAKS + const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions(); + lsan_parser.ParseString(lsan_default_options); +#endif + + // Override from command line. + asan_parser.ParseString(GetEnv("ASAN_OPTIONS")); +#if CAN_SANITIZE_LEAKS + lsan_parser.ParseString(GetEnv("LSAN_OPTIONS")); +#endif +#if CAN_SANITIZE_UB + ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS")); +#endif + + InitializeCommonFlags(); + + // TODO(eugenis): dump all flags at verbosity>=2? + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) { + // TODO(samsonov): print all of the flags (ASan, LSan, common). + asan_parser.PrintFlagDescriptions(); + } + + // Flag validation: + if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) { + Report("%s: detect_leaks is not supported on this platform.\n", + SanitizerToolName); + Die(); + } + // Ensure that redzone is at least SHADOW_GRANULARITY. + if (f->redzone < (int)SHADOW_GRANULARITY) + f->redzone = SHADOW_GRANULARITY; + // Make "strict_init_order" imply "check_initialization_order". + // TODO(samsonov): Use a single runtime flag for an init-order checker. + if (f->strict_init_order) { + f->check_initialization_order = true; + } + CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax); + CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log); + CHECK_GE(f->redzone, 16); + CHECK_GE(f->max_redzone, f->redzone); + CHECK_LE(f->max_redzone, 2048); + CHECK(IsPowerOfTwo(f->redzone)); + CHECK(IsPowerOfTwo(f->max_redzone)); + if (SANITIZER_RTEMS) { + CHECK(!f->unmap_shadow_on_exit); + CHECK(!f->protect_shadow_gap); + } + + // quarantine_size is deprecated but we still honor it. + // quarantine_size can not be used together with quarantine_size_mb. + if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) { + Report("%s: please use either 'quarantine_size' (deprecated) or " + "quarantine_size_mb, but not both\n", SanitizerToolName); + Die(); + } + if (f->quarantine_size >= 0) + f->quarantine_size_mb = f->quarantine_size >> 20; + if (f->quarantine_size_mb < 0) { + const int kDefaultQuarantineSizeMb = + (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8; + f->quarantine_size_mb = kDefaultQuarantineSizeMb; + } + if (f->thread_local_quarantine_size_kb < 0) { + const u32 kDefaultThreadLocalQuarantineSizeKb = + // It is not advised to go lower than 64Kb, otherwise quarantine batches + // pushed from thread local quarantine to global one will create too + // much overhead. One quarantine batch size is 8Kb and it holds up to + // 1021 chunk, which amounts to 1/8 memory overhead per batch when + // thread local quarantine is set to 64Kb. + (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10); + f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb; + } + if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) { + Report("%s: thread_local_quarantine_size_kb can be set to 0 only when " + "quarantine_size_mb is set to 0\n", SanitizerToolName); + Die(); + } + if (!f->replace_str && common_flags()->intercept_strlen) { + Report("WARNING: strlen interceptor is enabled even though replace_str=0. " + "Use intercept_strlen=0 to disable it."); + } + if (!f->replace_str && common_flags()->intercept_strchr) { + Report("WARNING: strchr* interceptors are enabled even though " + "replace_str=0. Use intercept_strchr=0 to disable them."); + } + if (!f->replace_str && common_flags()->intercept_strndup) { + Report("WARNING: strndup* interceptors are enabled even though " + "replace_str=0. Use intercept_strndup=0 to disable them."); + } +} + +} // namespace __asan + +SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) { + return ""; +} diff --git a/contrib/compiler-rt/lib/asan/asan_flags.h b/contrib/compiler-rt/lib/asan/asan_flags.h new file mode 100644 index 000000000000..4935161c30f1 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_flags.h @@ -0,0 +1,49 @@ +//===-- asan_flags.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan runtime flags. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_FLAGS_H +#define ASAN_FLAGS_H + +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_flag_parser.h" + +// ASan flag values can be defined in four ways: +// 1) initialized with default values at startup. +// 2) overriden during compilation of ASan runtime by providing +// compile definition ASAN_DEFAULT_OPTIONS. +// 3) overriden from string returned by user-specified function +// __asan_default_options(). +// 4) overriden from env variable ASAN_OPTIONS. +// 5) overriden during ASan activation (for now used on Android only). + +namespace __asan { + +struct Flags { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "asan_flags.inc" +#undef ASAN_FLAG + + void SetDefaults(); +}; + +extern Flags asan_flags_dont_use_directly; +inline Flags *flags() { + return &asan_flags_dont_use_directly; +} + +void InitializeFlags(); + +} // namespace __asan + +#endif // ASAN_FLAGS_H diff --git a/contrib/compiler-rt/lib/asan/asan_flags.inc b/contrib/compiler-rt/lib/asan/asan_flags.inc new file mode 100644 index 000000000000..a9c97d53b31f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_flags.inc @@ -0,0 +1,161 @@ +//===-- asan_flags.inc ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// ASan runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef ASAN_FLAG +# error "Define ASAN_FLAG prior to including this file!" +#endif + +// ASAN_FLAG(Type, Name, DefaultValue, Description) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +ASAN_FLAG(int, quarantine_size, -1, + "Deprecated, please use quarantine_size_mb.") +ASAN_FLAG(int, quarantine_size_mb, -1, + "Size (in Mb) of quarantine used to detect use-after-free " + "errors. Lower value may reduce memory usage but increase the " + "chance of false negatives.") +ASAN_FLAG(int, thread_local_quarantine_size_kb, -1, + "Size (in Kb) of thread local quarantine used to detect " + "use-after-free errors. Lower value may reduce memory usage but " + "increase the chance of false negatives. It is not advised to go " + "lower than 64Kb, otherwise frequent transfers to global quarantine " + "might affect performance.") +ASAN_FLAG(int, redzone, 16, + "Minimal size (in bytes) of redzones around heap objects. " + "Requirement: redzone >= 16, is a power of two.") +ASAN_FLAG(int, max_redzone, 2048, + "Maximal size (in bytes) of redzones around heap objects.") +ASAN_FLAG( + bool, debug, false, + "If set, prints some debugging information and does additional checks.") +ASAN_FLAG( + int, report_globals, 1, + "Controls the way to handle globals (0 - don't detect buffer overflow on " + "globals, 1 - detect buffer overflow, 2 - print data about registered " + "globals).") +ASAN_FLAG(bool, check_initialization_order, false, + "If set, attempts to catch initialization order issues.") +ASAN_FLAG( + bool, replace_str, true, + "If set, uses custom wrappers and replacements for libc string functions " + "to find more errors.") +ASAN_FLAG(bool, replace_intrin, true, + "If set, uses custom wrappers for memset/memcpy/memmove intrinsics.") +ASAN_FLAG(bool, detect_stack_use_after_return, false, + "Enables stack-use-after-return checking at run-time.") +ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway. + "Minimum fake stack size log.") +ASAN_FLAG(int, max_uar_stack_size_log, + 20, // 1Mb per size class, i.e. ~11Mb per thread + "Maximum fake stack size log.") +ASAN_FLAG(bool, uar_noreserve, false, + "Use mmap with 'noreserve' flag to allocate fake stack.") +ASAN_FLAG( + int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K. + "ASan allocator flag. max_malloc_fill_size is the maximal amount of " + "bytes that will be filled with malloc_fill_byte on malloc.") +ASAN_FLAG( + int, max_free_fill_size, 0, + "ASan allocator flag. max_free_fill_size is the maximal amount of " + "bytes that will be filled with free_fill_byte during free.") +ASAN_FLAG(int, malloc_fill_byte, 0xbe, + "Value used to fill the newly allocated memory.") +ASAN_FLAG(int, free_fill_byte, 0x55, + "Value used to fill deallocated memory.") +ASAN_FLAG(bool, allow_user_poisoning, true, + "If set, user may manually mark memory regions as poisoned or " + "unpoisoned.") +ASAN_FLAG( + int, sleep_before_dying, 0, + "Number of seconds to sleep between printing an error report and " + "terminating the program. Useful for debugging purposes (e.g. when one " + "needs to attach gdb).") +ASAN_FLAG( + int, sleep_after_init, 0, + "Number of seconds to sleep after AddressSanitizer is initialized. " + "Useful for debugging purposes (e.g. when one needs to attach gdb).") +ASAN_FLAG(bool, check_malloc_usable_size, true, + "Allows the users to work around the bug in Nvidia drivers prior to " + "295.*.") +ASAN_FLAG(bool, unmap_shadow_on_exit, false, + "If set, explicitly unmaps the (huge) shadow at exit.") +ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS, + "If set, mprotect the shadow gap") +ASAN_FLAG(bool, print_stats, false, + "Print various statistics after printing an error message or if " + "atexit=1.") +ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.") +ASAN_FLAG(bool, print_scariness, false, + "Print the scariness score. Experimental.") +ASAN_FLAG(bool, atexit, false, + "If set, prints ASan exit stats even after program terminates " + "successfully.") +ASAN_FLAG( + bool, print_full_thread_history, true, + "If set, prints thread creation stacks for the threads involved in the " + "report and their ancestors up to the main thread.") +ASAN_FLAG( + bool, poison_heap, true, + "Poison (or not) the heap memory on [de]allocation. Zero value is useful " + "for benchmarking the allocator or instrumentator.") +ASAN_FLAG(bool, poison_partial, true, + "If true, poison partially addressable 8-byte aligned words " + "(default=true). This flag affects heap and global buffers, but not " + "stack buffers.") +ASAN_FLAG(bool, poison_array_cookie, true, + "Poison (or not) the array cookie after operator new[].") + +// Turn off alloc/dealloc mismatch checker on Mac and Windows for now. +// https://github.com/google/sanitizers/issues/131 +// https://github.com/google/sanitizers/issues/309 +// TODO(glider,timurrrr): Fix known issues and enable this back. +ASAN_FLAG(bool, alloc_dealloc_mismatch, + !SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID, + "Report errors on malloc/delete, new/free, new/delete[], etc.") + +ASAN_FLAG(bool, new_delete_type_mismatch, true, + "Report errors on mismatch between size of new and delete.") +ASAN_FLAG( + bool, strict_init_order, false, + "If true, assume that dynamic initializers can never access globals from " + "other modules, even if the latter are already initialized.") +ASAN_FLAG( + bool, start_deactivated, false, + "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap " + "poisoning) to reduce memory consumption as much as possible, and " + "restores them to original values when the first instrumented module is " + "loaded into the process. This is mainly intended to be used on " + "Android. ") +ASAN_FLAG( + int, detect_invalid_pointer_pairs, 0, + "If >= 2, detect operations like <, <=, >, >= and - on invalid pointer " + "pairs (e.g. when pointers belong to different objects); " + "If == 1, detect invalid operations only when both pointers are non-null.") +ASAN_FLAG( + bool, detect_container_overflow, true, + "If true, honor the container overflow annotations. See " + "https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow") +ASAN_FLAG(int, detect_odr_violation, 2, + "If >=2, detect violation of One-Definition-Rule (ODR); " + "If ==1, detect ODR-violation only if the two variables " + "have different sizes") +ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.") +ASAN_FLAG(bool, halt_on_error, true, + "Crash the program after printing the first error report " + "(WARNING: USE AT YOUR OWN RISK!)") +ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true, + "realloc(p, 0) is equivalent to free(p) by default (Same as the " + "POSIX standard). If set to false, realloc(p, 0) will return a " + "pointer to an allocated space which can not be used.") +ASAN_FLAG(bool, verify_asan_link_order, true, + "Check position of ASan runtime in library list (needs to be disabled" + " when other library has to be preloaded system-wide)") diff --git a/contrib/compiler-rt/lib/asan/asan_fuchsia.cc b/contrib/compiler-rt/lib/asan/asan_fuchsia.cc new file mode 100644 index 000000000000..34399c92310b --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_fuchsia.cc @@ -0,0 +1,225 @@ +//===-- asan_fuchsia.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Fuchsia-specific details. +//===---------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_fuchsia.h" +#if SANITIZER_FUCHSIA + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stack.h" +#include "asan_thread.h" + +#include <limits.h> +#include <zircon/sanitizer.h> +#include <zircon/syscalls.h> +#include <zircon/threads.h> + +namespace __asan { + +// The system already set up the shadow memory for us. +// __sanitizer::GetMaxUserVirtualAddress has already been called by +// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc). +// Just do some additional sanity checks here. +void InitializeShadowMemory() { + if (Verbosity()) PrintAddressSpaceLayout(); + + // Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address. + __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; + DCHECK(kLowShadowBeg != kDefaultShadowSentinel); + __asan_shadow_memory_dynamic_address = kLowShadowBeg; + + CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); + CHECK_EQ(kHighMemEnd, __sanitizer::ShadowBounds.memory_limit - 1); + CHECK_EQ(kHighMemBeg, __sanitizer::ShadowBounds.shadow_limit); + CHECK_EQ(kHighShadowBeg, __sanitizer::ShadowBounds.shadow_base); + CHECK_EQ(kShadowGapEnd, __sanitizer::ShadowBounds.shadow_base - 1); + CHECK_EQ(kLowShadowEnd, 0); + CHECK_EQ(kLowShadowBeg, 0); +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +void InitializeAsanInterceptors() {} + +void *AsanDoesNotSupportStaticLinkage() { return nullptr; } + +void InitializePlatformExceptionHandlers() {} +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +// We can use a plain thread_local variable for TSD. +static thread_local void *per_thread; + +void *AsanTSDGet() { return per_thread; } + +void AsanTSDSet(void *tsd) { per_thread = tsd; } + +// There's no initialization needed, and the passed-in destructor +// will never be called. Instead, our own thread destruction hook +// (below) will call AsanThread::TSDDtor directly. +void AsanTSDInit(void (*destructor)(void *tsd)) { + DCHECK(destructor == &PlatformTSDDtor); +} + +void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } + +static inline size_t AsanThreadMmapSize() { + return RoundUpTo(sizeof(AsanThread), PAGE_SIZE); +} + +struct AsanThread::InitOptions { + uptr stack_bottom, stack_size; +}; + +// Shared setup between thread creation and startup for the initial thread. +static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, + uptr user_id, bool detached, + const char *name, uptr stack_bottom, + uptr stack_size) { + // In lieu of AsanThread::Create. + AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__); + + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + u32 tid = + asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); + asanThreadRegistry().SetThreadName(tid, name); + + // On other systems, AsanThread::Init() is called from the new + // thread itself. But on Fuchsia we already know the stack address + // range beforehand, so we can do most of the setup right now. + const AsanThread::InitOptions options = {stack_bottom, stack_size}; + thread->Init(&options); + + return thread; +} + +// This gets the same arguments passed to Init by CreateAsanThread, above. +// We're in the creator thread before the new thread is actually started, +// but its stack address range is already known. We don't bother tracking +// the static TLS address range because the system itself already uses an +// ASan-aware allocator for that. +void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { + DCHECK_NE(GetCurrentThread(), this); + DCHECK_NE(GetCurrentThread(), nullptr); + CHECK_NE(options->stack_bottom, 0); + CHECK_NE(options->stack_size, 0); + stack_bottom_ = options->stack_bottom; + stack_top_ = options->stack_bottom + options->stack_size; +} + +// Called by __asan::AsanInitInternal (asan_rtl.c). +AsanThread *CreateMainThread() { + thrd_t self = thrd_current(); + char name[ZX_MAX_NAME_LEN]; + CHECK_NE(__sanitizer::MainThreadStackBase, 0); + CHECK_GT(__sanitizer::MainThreadStackSize, 0); + AsanThread *t = CreateAsanThread( + nullptr, 0, reinterpret_cast<uptr>(self), true, + _zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name, + sizeof(name)) == ZX_OK + ? name + : nullptr, + __sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize); + SetCurrentThread(t); + return t; +} + +// This is called before each thread creation is attempted. So, in +// its first call, the calling thread is the initial and sole thread. +static void *BeforeThreadCreateHook(uptr user_id, bool detached, + const char *name, uptr stack_bottom, + uptr stack_size) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) StopInitOrderChecking(); + + GET_STACK_TRACE_THREAD; + u32 parent_tid = GetCurrentTidOrInvalid(); + + return CreateAsanThread(&stack, parent_tid, user_id, detached, name, + stack_bottom, stack_size); +} + +// This is called after creating a new thread (in the creating thread), +// with the pointer returned by BeforeThreadCreateHook (above). +static void ThreadCreateHook(void *hook, bool aborted) { + AsanThread *thread = static_cast<AsanThread *>(hook); + if (!aborted) { + // The thread was created successfully. + // ThreadStartHook is already running in the new thread. + } else { + // The thread wasn't created after all. + // Clean up everything we set up in BeforeThreadCreateHook. + asanThreadRegistry().FinishThread(thread->tid()); + UnmapOrDie(thread, AsanThreadMmapSize()); + } +} + +// This is called in the newly-created thread before it runs anything else, +// with the pointer returned by BeforeThreadCreateHook (above). +// cf. asan_interceptors.cc:asan_thread_start +static void ThreadStartHook(void *hook, uptr os_id) { + AsanThread *thread = static_cast<AsanThread *>(hook); + SetCurrentThread(thread); + + // In lieu of AsanThread::ThreadStart. + asanThreadRegistry().StartThread(thread->tid(), os_id, /*workerthread*/ false, + nullptr); +} + +// Each thread runs this just before it exits, +// with the pointer returned by BeforeThreadCreateHook (above). +// All per-thread destructors have already been called. +static void ThreadExitHook(void *hook, uptr os_id) { + AsanThread::TSDDtor(per_thread); +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} + +} // namespace __asan + +// These are declared (in extern "C") by <zircon/sanitizer.h>. +// The system runtime will call our definitions directly. + +void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached, + const char *name, void *stack_base, + size_t stack_size) { + return __asan::BeforeThreadCreateHook( + reinterpret_cast<uptr>(thread), detached, name, + reinterpret_cast<uptr>(stack_base), stack_size); +} + +void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) { + __asan::ThreadCreateHook(hook, error != thrd_success); +} + +void __sanitizer_thread_start_hook(void *hook, thrd_t self) { + __asan::ThreadStartHook(hook, reinterpret_cast<uptr>(self)); +} + +void __sanitizer_thread_exit_hook(void *hook, thrd_t self) { + __asan::ThreadExitHook(hook, reinterpret_cast<uptr>(self)); +} + +#endif // SANITIZER_FUCHSIA diff --git a/contrib/compiler-rt/lib/asan/asan_globals.cc b/contrib/compiler-rt/lib/asan/asan_globals.cc new file mode 100644 index 000000000000..146234ac6c64 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_globals.cc @@ -0,0 +1,465 @@ +//===-- asan_globals.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Handle globals. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +typedef __asan_global Global; + +struct ListOfGlobals { + const Global *g; + ListOfGlobals *next; +}; + +static BlockingMutex mu_for_globals(LINKER_INITIALIZED); +static LowLevelAllocator allocator_for_globals; +static ListOfGlobals *list_of_all_globals; + +static const int kDynamicInitGlobalsInitialCapacity = 512; +struct DynInitGlobal { + Global g; + bool initialized; +}; +typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals; +// Lazy-initialized and never deleted. +static VectorOfGlobals *dynamic_init_globals; + +// We want to remember where a certain range of globals was registered. +struct GlobalRegistrationSite { + u32 stack_id; + Global *g_first, *g_last; +}; +typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector; +static GlobalRegistrationSiteVector *global_registration_site_vector; + +ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { + FastPoisonShadow(g->beg, g->size_with_redzone, value); +} + +ALWAYS_INLINE void PoisonRedZones(const Global &g) { + uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); + FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, + kAsanGlobalRedzoneMagic); + if (g.size != aligned_size) { + FastPoisonShadowPartialRightRedzone( + g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), + g.size % SHADOW_GRANULARITY, + SHADOW_GRANULARITY, + kAsanGlobalRedzoneMagic); + } +} + +const uptr kMinimalDistanceFromAnotherGlobal = 64; + +static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { + if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; + if (addr >= g.beg + g.size_with_redzone) return false; + return true; +} + +static void ReportGlobal(const Global &g, const char *prefix) { + Report( + "%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu " + "odr_indicator=%p\n", + prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name, + g.module_name, g.has_dynamic_init, (void *)g.odr_indicator); + if (g.location) { + Report(" location (%p): name=%s[%p], %d %d\n", g.location, + g.location->filename, g.location->filename, g.location->line_no, + g.location->column_no); + } +} + +static u32 FindRegistrationSite(const Global *g) { + mu_for_globals.CheckLocked(); + CHECK(global_registration_site_vector); + for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { + GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; + if (g >= grs.g_first && g <= grs.g_last) + return grs.stack_id; + } + return 0; +} + +int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites, + int max_globals) { + if (!flags()->report_globals) return 0; + BlockingMutexLock lock(&mu_for_globals); + int res = 0; + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + const Global &g = *l->g; + if (flags()->report_globals >= 2) + ReportGlobal(g, "Search"); + if (IsAddressNearGlobal(addr, g)) { + globals[res] = g; + if (reg_sites) + reg_sites[res] = FindRegistrationSite(&g); + res++; + if (res == max_globals) break; + } + } + return res; +} + +enum GlobalSymbolState { + UNREGISTERED = 0, + REGISTERED = 1 +}; + +// Check ODR violation for given global G via special ODR indicator. We use +// this method in case compiler instruments global variables through their +// local aliases. +static void CheckODRViolationViaIndicator(const Global *g) { + // Instrumentation requests to skip ODR check. + if (g->odr_indicator == UINTPTR_MAX) + return; + u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator); + if (*odr_indicator == UNREGISTERED) { + *odr_indicator = REGISTERED; + return; + } + // If *odr_indicator is DEFINED, some module have already registered + // externally visible symbol with the same name. This is an ODR violation. + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + if (g->odr_indicator == l->g->odr_indicator && + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) + ReportODRViolation(g, FindRegistrationSite(g), + l->g, FindRegistrationSite(l->g)); + } +} + +// Check ODR violation for given global G by checking if it's already poisoned. +// We use this method in case compiler doesn't use private aliases for global +// variables. +static void CheckODRViolationViaPoisoning(const Global *g) { + if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { + // This check may not be enough: if the first global is much larger + // the entire redzone of the second global may be within the first global. + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + if (g->beg == l->g->beg && + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) + ReportODRViolation(g, FindRegistrationSite(g), + l->g, FindRegistrationSite(l->g)); + } + } +} + +// Clang provides two different ways for global variables protection: +// it can poison the global itself or its private alias. In former +// case we may poison same symbol multiple times, that can help us to +// cheaply detect ODR violation: if we try to poison an already poisoned +// global, we have ODR violation error. +// In latter case, we poison each symbol exactly once, so we use special +// indicator symbol to perform similar check. +// In either case, compiler provides a special odr_indicator field to Global +// structure, that can contain two kinds of values: +// 1) Non-zero value. In this case, odr_indicator is an address of +// corresponding indicator variable for given global. +// 2) Zero. This means that we don't use private aliases for global variables +// and can freely check ODR violation with the first method. +// +// This routine chooses between two different methods of ODR violation +// detection. +static inline bool UseODRIndicator(const Global *g) { + return g->odr_indicator > 0; +} + +// Register a global variable. +// This function may be called more than once for every global +// so we store the globals in a map. +static void RegisterGlobal(const Global *g) { + CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Added"); + CHECK(flags()->report_globals); + CHECK(AddrIsInMem(g->beg)); + if (!AddrIsAlignedByGranularity(g->beg)) { + Report("The following global variable is not properly aligned.\n"); + Report("This may happen if another global with the same name\n"); + Report("resides in another non-instrumented module.\n"); + Report("Or the global comes from a C file built w/o -fno-common.\n"); + Report("In either case this is likely an ODR violation bug,\n"); + Report("but AddressSanitizer can not provide more details.\n"); + ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g)); + CHECK(AddrIsAlignedByGranularity(g->beg)); + } + CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); + if (flags()->detect_odr_violation) { + // Try detecting ODR (One Definition Rule) violation, i.e. the situation + // where two globals with the same name are defined in different modules. + if (UseODRIndicator(g)) + CheckODRViolationViaIndicator(g); + else + CheckODRViolationViaPoisoning(g); + } + if (CanPoisonMemory()) + PoisonRedZones(*g); + ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; + l->g = g; + l->next = list_of_all_globals; + list_of_all_globals = l; + if (g->has_dynamic_init) { + if (!dynamic_init_globals) { + dynamic_init_globals = + new (allocator_for_globals) VectorOfGlobals; // NOLINT + dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity); + } + DynInitGlobal dyn_global = { *g, false }; + dynamic_init_globals->push_back(dyn_global); + } +} + +static void UnregisterGlobal(const Global *g) { + CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Removed"); + CHECK(flags()->report_globals); + CHECK(AddrIsInMem(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); + if (CanPoisonMemory()) + PoisonShadowForGlobal(g, 0); + // We unpoison the shadow memory for the global but we do not remove it from + // the list because that would require O(n^2) time with the current list + // implementation. It might not be worth doing anyway. + + // Release ODR indicator. + if (UseODRIndicator(g) && g->odr_indicator != UINTPTR_MAX) { + u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator); + *odr_indicator = UNREGISTERED; + } +} + +void StopInitOrderChecking() { + BlockingMutexLock lock(&mu_for_globals); + if (!flags()->check_initialization_order || !dynamic_init_globals) + return; + flags()->check_initialization_order = false; + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } +} + +static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; } + +const char *MaybeDemangleGlobalName(const char *name) { + // We can spoil names of globals with C linkage, so use an heuristic + // approach to check if the name should be demangled. + bool should_demangle = false; + if (name[0] == '_' && name[1] == 'Z') + should_demangle = true; + else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?') + should_demangle = true; + + return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name; +} + +// Check if the global is a zero-terminated ASCII string. If so, print it. +void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) { + for (uptr p = g.beg; p < g.beg + g.size - 1; p++) { + unsigned char c = *(unsigned char *)p; + if (c == '\0' || !IsASCII(c)) return; + } + if (*(char *)(g.beg + g.size - 1) != '\0') return; + str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name), + (char *)g.beg); +} + +static const char *GlobalFilename(const __asan_global &g) { + const char *res = g.module_name; + // Prefer the filename from source location, if is available. + if (g.location) res = g.location->filename; + CHECK(res); + return res; +} + +void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) { + str->append("%s", GlobalFilename(g)); + if (!g.location) return; + if (g.location->line_no) str->append(":%d", g.location->line_no); + if (g.location->column_no) str->append(":%d", g.location->column_no); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + + +// Apply __asan_register_globals to all globals found in the same loaded +// executable or shared library as `flag'. The flag tracks whether globals have +// already been registered or not for this image. +void __asan_register_image_globals(uptr *flag) { + if (*flag) + return; + AsanApplyToGlobals(__asan_register_globals, flag); + *flag = 1; +} + +// This mirrors __asan_register_image_globals. +void __asan_unregister_image_globals(uptr *flag) { + if (!*flag) + return; + AsanApplyToGlobals(__asan_unregister_globals, flag); + *flag = 0; +} + +void __asan_register_elf_globals(uptr *flag, void *start, void *stop) { + if (*flag) return; + if (!start) return; + CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); + __asan_global *globals_start = (__asan_global*)start; + __asan_global *globals_stop = (__asan_global*)stop; + __asan_register_globals(globals_start, globals_stop - globals_start); + *flag = 1; +} + +void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) { + if (!*flag) return; + if (!start) return; + CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); + __asan_global *globals_start = (__asan_global*)start; + __asan_global *globals_stop = (__asan_global*)stop; + __asan_unregister_globals(globals_start, globals_stop - globals_start); + *flag = 0; +} + +// Register an array of globals. +void __asan_register_globals(__asan_global *globals, uptr n) { + if (!flags()->report_globals) return; + GET_STACK_TRACE_MALLOC; + u32 stack_id = StackDepotPut(stack); + BlockingMutexLock lock(&mu_for_globals); + if (!global_registration_site_vector) { + global_registration_site_vector = + new (allocator_for_globals) GlobalRegistrationSiteVector; // NOLINT + global_registration_site_vector->reserve(128); + } + GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; + global_registration_site_vector->push_back(site); + if (flags()->report_globals >= 2) { + PRINT_CURRENT_STACK(); + Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]); + } + for (uptr i = 0; i < n; i++) { + if (SANITIZER_WINDOWS && globals[i].beg == 0) { + // The MSVC incremental linker may pad globals out to 256 bytes. As long + // as __asan_global is less than 256 bytes large and its size is a power + // of two, we can skip over the padding. + static_assert( + sizeof(__asan_global) < 256 && + (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0, + "sizeof(__asan_global) incompatible with incremental linker padding"); + // If these are padding bytes, the rest of the global should be zero. + CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 && + globals[i].name == nullptr && globals[i].module_name == nullptr && + globals[i].odr_indicator == 0); + continue; + } + RegisterGlobal(&globals[i]); + } + + // Poison the metadata. It should not be accessible to user code. + PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global), + kAsanGlobalRedzoneMagic); +} + +// Unregister an array of globals. +// We must do this when a shared objects gets dlclosed. +void __asan_unregister_globals(__asan_global *globals, uptr n) { + if (!flags()->report_globals) return; + BlockingMutexLock lock(&mu_for_globals); + for (uptr i = 0; i < n; i++) { + if (SANITIZER_WINDOWS && globals[i].beg == 0) { + // Skip globals that look like padding from the MSVC incremental linker. + // See comment in __asan_register_globals. + continue; + } + UnregisterGlobal(&globals[i]); + } + + // Unpoison the metadata. + PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global), 0); +} + +// This method runs immediately prior to dynamic initialization in each TU, +// when all dynamically initialized globals are unpoisoned. This method +// poisons all global variables not defined in this TU, so that a dynamic +// initializer can only touch global variables in the same TU. +void __asan_before_dynamic_init(const char *module_name) { + if (!flags()->check_initialization_order || + !CanPoisonMemory() || + !dynamic_init_globals) + return; + bool strict_init_order = flags()->strict_init_order; + CHECK(module_name); + CHECK(asan_inited); + BlockingMutexLock lock(&mu_for_globals); + if (flags()->report_globals >= 3) + Printf("DynInitPoison module: %s\n", module_name); + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (dyn_g.initialized) + continue; + if (g->module_name != module_name) + PoisonShadowForGlobal(g, kAsanInitializationOrderMagic); + else if (!strict_init_order) + dyn_g.initialized = true; + } +} + +// This method runs immediately after dynamic initialization in each TU, when +// all dynamically initialized globals except for those defined in the current +// TU are poisoned. It simply unpoisons all dynamically initialized globals. +void __asan_after_dynamic_init() { + if (!flags()->check_initialization_order || + !CanPoisonMemory() || + !dynamic_init_globals) + return; + CHECK(asan_inited); + BlockingMutexLock lock(&mu_for_globals); + // FIXME: Optionally report that we're unpoisoning globals from a module. + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (!dyn_g.initialized) { + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } + } +} diff --git a/contrib/compiler-rt/lib/asan/asan_globals_win.cc b/contrib/compiler-rt/lib/asan/asan_globals_win.cc new file mode 100644 index 000000000000..0e75992bfc8d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_globals_win.cc @@ -0,0 +1,62 @@ +//===-- asan_globals_win.cc -----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Global registration code that is linked into every Windows DLL and EXE. +// +//===----------------------------------------------------------------------===// + +#include "asan_interface_internal.h" +#if SANITIZER_WINDOWS + +namespace __asan { + +#pragma section(".ASAN$GA", read, write) // NOLINT +#pragma section(".ASAN$GZ", read, write) // NOLINT +extern "C" __declspec(allocate(".ASAN$GA")) + ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {}; +extern "C" __declspec(allocate(".ASAN$GZ")) + ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {}; +#pragma comment(linker, "/merge:.ASAN=.data") + +static void call_on_globals(void (*hook)(__asan_global *, uptr)) { + __asan_global *start = &__asan_globals_start + 1; + __asan_global *end = &__asan_globals_end; + uptr bytediff = (uptr)end - (uptr)start; + if (bytediff % sizeof(__asan_global) != 0) { +#if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK) + __debugbreak(); +#else + CHECK("corrupt asan global array"); +#endif + } + // We know end >= start because the linker sorts the portion after the dollar + // sign alphabetically. + uptr n = end - start; + hook(start, n); +} + +static void register_dso_globals() { + call_on_globals(&__asan_register_globals); +} + +static void unregister_dso_globals() { + call_on_globals(&__asan_unregister_globals); +} + +// Register globals +#pragma section(".CRT$XCU", long, read) // NOLINT +#pragma section(".CRT$XTX", long, read) // NOLINT +extern "C" __declspec(allocate(".CRT$XCU")) +void (*const __asan_dso_reg_hook)() = ®ister_dso_globals; +extern "C" __declspec(allocate(".CRT$XTX")) +void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals; + +} // namespace __asan + +#endif // SANITIZER_WINDOWS diff --git a/contrib/compiler-rt/lib/asan/asan_init_version.h b/contrib/compiler-rt/lib/asan/asan_init_version.h new file mode 100644 index 000000000000..c49fcd740248 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_init_version.h @@ -0,0 +1,45 @@ +//===-- asan_init_version.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This header defines a versioned __asan_init function to be called at the +// startup of the instrumented program. +//===----------------------------------------------------------------------===// +#ifndef ASAN_INIT_VERSION_H +#define ASAN_INIT_VERSION_H + +#include "sanitizer_common/sanitizer_platform.h" + +extern "C" { + // Every time the ASan ABI changes we also change the version number in the + // __asan_init function name. Objects built with incompatible ASan ABI + // versions will not link with run-time. + // + // Changes between ABI versions: + // v1=>v2: added 'module_name' to __asan_global + // v2=>v3: stack frame description (created by the compiler) + // contains the function PC as the 3rd field (see + // DescribeAddressIfStack) + // v3=>v4: added '__asan_global_source_location' to __asan_global + // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and + // __asan_stack_free_ functions + // v5=>v6: changed the name of the version check symbol + // v6=>v7: added 'odr_indicator' to __asan_global + // v7=>v8: added '__asan_(un)register_image_globals' functions for dead + // stripping support on Mach-O platforms +#if SANITIZER_WORDSIZE == 32 && SANITIZER_ANDROID + // v8=>v9: 32-bit Android switched to dynamic shadow + #define __asan_version_mismatch_check __asan_version_mismatch_check_v9 +#else + #define __asan_version_mismatch_check __asan_version_mismatch_check_v8 +#endif +} + +#endif // ASAN_INIT_VERSION_H diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors.cc b/contrib/compiler-rt/lib/asan/asan_interceptors.cc new file mode 100644 index 000000000000..aac2bb8a6bbf --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interceptors.cc @@ -0,0 +1,667 @@ +//===-- asan_interceptors.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Intercept various libc functions. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_libc.h" + +// There is no general interception at all on Fuchsia and RTEMS. +// Only the functions in asan_interceptors_memintrinsics.cc are +// really defined to replace libc functions. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +#if SANITIZER_POSIX +#include "sanitizer_common/sanitizer_posix.h" +#endif + +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \ + ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION +#include <unwind.h> +#endif + +#if defined(__i386) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" +#elif defined(__mips__) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" +#endif + +namespace __asan { + +#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ + ASAN_READ_RANGE((ctx), (s), \ + common_flags()->strict_string_checks ? (len) + 1 : (n)) + +#define ASAN_READ_STRING(ctx, s, n) \ + ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) + +static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { +#if SANITIZER_INTERCEPT_STRNLEN + if (REAL(strnlen)) { + return REAL(strnlen)(s, maxlen); + } +#endif + return internal_strnlen(s, maxlen); +} + +void SetThreadName(const char *name) { + AsanThread *t = GetCurrentThread(); + if (t) + asanThreadRegistry().SetThreadName(t->tid(), name); +} + +int OnExit() { + if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks && + __lsan::HasReportedLeaks()) { + return common_flags()->exitcode; + } + // FIXME: ask frontend whether we need to return failure. + return 0; +} + +} // namespace __asan + +// ---------------------- Wrappers ---------------- {{{1 +using namespace __asan; // NOLINT + +DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) +DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) + +#define ASAN_INTERCEPTOR_ENTER(ctx, func) \ + AsanInterceptorContext _ctx = {#func}; \ + ctx = (void *)&_ctx; \ + (void) ctx; \ + +#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) +#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ + ASAN_INTERCEPT_FUNC_VER(name, ver) +#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ + ASAN_WRITE_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + ASAN_READ_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + ASAN_INTERCEPTOR_ENTER(ctx, func); \ + do { \ + if (asan_init_is_running) \ + return REAL(func)(__VA_ARGS__); \ + if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ + return REAL(func)(__VA_ARGS__); \ + ENSURE_ASAN_INITED(); \ + } while (false) +#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) +// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name) +// But asan does not remember UserId's for threads (pthread_t); +// and remembers all ever existed threads, so the linear search by UserId +// can be slow. +#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) +// Strict init-order checking is dlopen-hostile: +// https://github.com/google/sanitizers/issues/178 +#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ + do { \ + if (flags()->strict_init_order) \ + StopInitOrderChecking(); \ + CheckNoDeepBind(filename, flag); \ + } while (false) +#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) +#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() +#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (AsanThread *t = GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } + +#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ + do { \ + ASAN_INTERCEPTOR_ENTER(ctx, memmove); \ + ASAN_MEMMOVE_IMPL(ctx, to, from, size); \ + } while (false) + +#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ + do { \ + ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \ + ASAN_MEMCPY_IMPL(ctx, to, from, size); \ + } while (false) + +#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ + do { \ + ASAN_INTERCEPTOR_ENTER(ctx, memset); \ + ASAN_MEMSET_IMPL(ctx, block, c, size); \ + } while (false) + +#include "sanitizer_common/sanitizer_common_interceptors.inc" +#include "sanitizer_common/sanitizer_signal_interceptors.inc" + +// Syscall interceptors don't have contexts, we don't support suppressions +// for them. +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) +#include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" + +struct ThreadStartParam { + atomic_uintptr_t t; + atomic_uintptr_t is_registered; +}; + +#if ASAN_INTERCEPT_PTHREAD_CREATE +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg); + AsanThread *t = nullptr; + while ((t = reinterpret_cast<AsanThread *>( + atomic_load(¶m->t, memory_order_acquire))) == nullptr) + internal_sched_yield(); + SetCurrentThread(t); + return t->ThreadStart(GetTid(), ¶m->is_registered); +} + +INTERCEPTOR(int, pthread_create, void *thread, + void *attr, void *(*start_routine)(void*), void *arg) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + int detached = 0; + if (attr) + REAL(pthread_attr_getdetachstate)(attr, &detached); + ThreadStartParam param; + atomic_store(¶m.t, 0, memory_order_relaxed); + atomic_store(¶m.is_registered, 0, memory_order_relaxed); + int result; + { + // Ignore all allocations made by pthread_create: thread stack/TLS may be + // stored by pthread for future reuse even after thread destruction, and + // the linked list it's stored in doesn't even hold valid pointers to the + // objects, the latter are calculated by obscure pointer arithmetic. +#if CAN_SANITIZE_LEAKS + __lsan::ScopedInterceptorDisabler disabler; +#endif + result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); + } + if (result == 0) { + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release); + // Wait until the AsanThread object is initialized and the ThreadRegistry + // entry is in "started" state. One reason for this is that after this + // interceptor exits, the child thread's stack may be the only thing holding + // the |arg| pointer. This may cause LSan to report a leak if leak checking + // happens at a point when the interceptor has already exited, but the stack + // range for the child thread is not yet known. + while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) + internal_sched_yield(); + } + return result; +} + +INTERCEPTOR(int, pthread_join, void *t, void **arg) { + return real_pthread_join(t, arg); +} + +DEFINE_REAL_PTHREAD_FUNCTIONS +#endif // ASAN_INTERCEPT_PTHREAD_CREATE + +#if ASAN_INTERCEPT_SWAPCONTEXT +static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { + // Align to page size. + uptr PageSize = GetPageSizeCached(); + uptr bottom = stack & ~(PageSize - 1); + ssize += stack - bottom; + ssize = RoundUpTo(ssize, PageSize); + static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb + if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { + PoisonShadow(bottom, ssize, 0); + } +} + +INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, + struct ucontext_t *ucp) { + static bool reported_warning = false; + if (!reported_warning) { + Report("WARNING: ASan doesn't fully support makecontext/swapcontext " + "functions and may produce false positives in some cases!\n"); + reported_warning = true; + } + // Clear shadow memory for new context (it may share stack + // with current context). + uptr stack, ssize; + ReadContextStack(ucp, &stack, &ssize); + ClearShadowMemoryForContextStack(stack, ssize); +#if __has_attribute(__indirect_return__) && \ + (defined(__x86_64__) || defined(__i386__)) + int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *) + __attribute__((__indirect_return__)) + = REAL(swapcontext); + int res = real_swapcontext(oucp, ucp); +#else + int res = REAL(swapcontext)(oucp, ucp); +#endif + // swapcontext technically does not return, but program may swap context to + // "oucp" later, that would look as if swapcontext() returned 0. + // We need to clear shadow for ucp once again, as it may be in arbitrary + // state. + ClearShadowMemoryForContextStack(stack, ssize); + return res; +} +#endif // ASAN_INTERCEPT_SWAPCONTEXT + +#if SANITIZER_NETBSD +#define longjmp __longjmp14 +#define siglongjmp __siglongjmp14 +#endif + +INTERCEPTOR(void, longjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(longjmp)(env, val); +} + +#if ASAN_INTERCEPT__LONGJMP +INTERCEPTOR(void, _longjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(_longjmp)(env, val); +} +#endif + +#if ASAN_INTERCEPT___LONGJMP_CHK +INTERCEPTOR(void, __longjmp_chk, void *env, int val) { + __asan_handle_no_return(); + REAL(__longjmp_chk)(env, val); +} +#endif + +#if ASAN_INTERCEPT_SIGLONGJMP +INTERCEPTOR(void, siglongjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(siglongjmp)(env, val); +} +#endif + +#if ASAN_INTERCEPT___CXA_THROW +INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { + CHECK(REAL(__cxa_throw)); + __asan_handle_no_return(); + REAL(__cxa_throw)(a, b, c); +} +#endif + +#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION +INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) { + CHECK(REAL(__cxa_rethrow_primary_exception)); + __asan_handle_no_return(); + REAL(__cxa_rethrow_primary_exception)(a); +} +#endif + +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION +INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException, + _Unwind_Exception *object) { + CHECK(REAL(_Unwind_RaiseException)); + __asan_handle_no_return(); + return REAL(_Unwind_RaiseException)(object); +} +#endif + +#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION +INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException, + _Unwind_Exception *object) { + CHECK(REAL(_Unwind_SjLj_RaiseException)); + __asan_handle_no_return(); + return REAL(_Unwind_SjLj_RaiseException)(object); +} +#endif + +#if ASAN_INTERCEPT_INDEX +# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX +INTERCEPTOR(char*, index, const char *string, int c) + ALIAS(WRAPPER_NAME(strchr)); +# else +# if SANITIZER_MAC +DECLARE_REAL(char*, index, const char *string, int c) +OVERRIDE_FUNCTION(index, strchr); +# else +DEFINE_REAL(char*, index, const char *string, int c) +# endif +# endif +#endif // ASAN_INTERCEPT_INDEX + +// For both strcat() and strncat() we need to check the validity of |to| +// argument irrespective of the |from| length. +INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_length = REAL(strlen)(from); + ASAN_READ_RANGE(ctx, from, from_length + 1); + uptr to_length = REAL(strlen)(to); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); + // If the copying actually happens, the |from| string should not overlap + // with the resulting string starting at |to|, which has a length of + // to_length + from_length + 1. + if (from_length > 0) { + CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1, + from, from_length + 1); + } + } + return REAL(strcat)(to, from); // NOLINT +} + +INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncat); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_length = MaybeRealStrnlen(from, size); + uptr copy_length = Min(size, from_length + 1); + ASAN_READ_RANGE(ctx, from, copy_length); + uptr to_length = REAL(strlen)(to); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); + if (from_length > 0) { + CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, + from, copy_length); + } + } + return REAL(strncat)(to, from, size); +} + +INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT +#endif + // strcpy is called from malloc_default_purgeable_zone() + // in __asan::ReplaceSystemAlloc() on Mac. + if (asan_init_is_running) { + return REAL(strcpy)(to, from); // NOLINT + } + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_size = REAL(strlen)(from) + 1; + CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, from_size); + } + return REAL(strcpy)(to, from); // NOLINT +} + +INTERCEPTOR(char*, strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); + if (UNLIKELY(!asan_inited)) return internal_strdup(s); + ENSURE_ASAN_INITED(); + uptr length = REAL(strlen)(s); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, length + 1); + } + GET_STACK_TRACE_MALLOC; + void *new_mem = asan_malloc(length + 1, &stack); + REAL(memcpy)(new_mem, s, length + 1); + return reinterpret_cast<char*>(new_mem); +} + +#if ASAN_INTERCEPT___STRDUP +INTERCEPTOR(char*, __strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); + if (UNLIKELY(!asan_inited)) return internal_strdup(s); + ENSURE_ASAN_INITED(); + uptr length = REAL(strlen)(s); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, length + 1); + } + GET_STACK_TRACE_MALLOC; + void *new_mem = asan_malloc(length + 1, &stack); + REAL(memcpy)(new_mem, s, length + 1); + return reinterpret_cast<char*>(new_mem); +} +#endif // ASAN_INTERCEPT___STRDUP + +INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncpy); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); + CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size); + } + return REAL(strncpy)(to, from, size); +} + +INTERCEPTOR(long, strtol, const char *nptr, // NOLINT + char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtol); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(strtol)(nptr, endptr, base); + } + char *real_endptr; + long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); + return result; +} + +INTERCEPTOR(int, atoi, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoi); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); +#endif + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atoi)(nptr); + } + char *real_endptr; + // "man atoi" tells that behavior of atoi(nptr) is the same as + // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the + // parsed integer can't be stored in *long* type (even if it's + // different from int). So, we just imitate this behavior. + int result = REAL(strtol)(nptr, &real_endptr, 10); + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} + +INTERCEPTOR(long, atol, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atol); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); +#endif + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atol)(nptr); + } + char *real_endptr; + long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} + +#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL +INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT + char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtoll); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(strtoll)(nptr, endptr, base); + } + char *real_endptr; + long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); + return result; +} + +INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoll); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atoll)(nptr); + } + char *real_endptr; + long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} +#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL + +#if ASAN_INTERCEPT___CXA_ATEXIT +static void AtCxaAtexit(void *unused) { + (void)unused; + StopInitOrderChecking(); +} + +INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, + void *dso_handle) { +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle); +#endif + ENSURE_ASAN_INITED(); + int res = REAL(__cxa_atexit)(func, arg, dso_handle); + REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); + return res; +} +#endif // ASAN_INTERCEPT___CXA_ATEXIT + +// ---------------------- InitializeAsanInterceptors ---------------- {{{1 +namespace __asan { +void InitializeAsanInterceptors() { + static bool was_called_once; + CHECK(!was_called_once); + was_called_once = true; + InitializeCommonInterceptors(); + InitializeSignalInterceptors(); + + // Intercept str* functions. + ASAN_INTERCEPT_FUNC(strcat); // NOLINT + ASAN_INTERCEPT_FUNC(strcpy); // NOLINT + ASAN_INTERCEPT_FUNC(strncat); + ASAN_INTERCEPT_FUNC(strncpy); + ASAN_INTERCEPT_FUNC(strdup); +#if ASAN_INTERCEPT___STRDUP + ASAN_INTERCEPT_FUNC(__strdup); +#endif +#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX + ASAN_INTERCEPT_FUNC(index); +#endif + + ASAN_INTERCEPT_FUNC(atoi); + ASAN_INTERCEPT_FUNC(atol); + ASAN_INTERCEPT_FUNC(strtol); +#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL + ASAN_INTERCEPT_FUNC(atoll); + ASAN_INTERCEPT_FUNC(strtoll); +#endif + + // Intecept jump-related functions. + ASAN_INTERCEPT_FUNC(longjmp); + +#if ASAN_INTERCEPT_SWAPCONTEXT + ASAN_INTERCEPT_FUNC(swapcontext); +#endif +#if ASAN_INTERCEPT__LONGJMP + ASAN_INTERCEPT_FUNC(_longjmp); +#endif +#if ASAN_INTERCEPT___LONGJMP_CHK + ASAN_INTERCEPT_FUNC(__longjmp_chk); +#endif +#if ASAN_INTERCEPT_SIGLONGJMP + ASAN_INTERCEPT_FUNC(siglongjmp); +#endif + + // Intercept exception handling functions. +#if ASAN_INTERCEPT___CXA_THROW + ASAN_INTERCEPT_FUNC(__cxa_throw); +#endif +#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION + ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception); +#endif + // Indirectly intercept std::rethrow_exception. +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION + INTERCEPT_FUNCTION(_Unwind_RaiseException); +#endif + // Indirectly intercept std::rethrow_exception. +#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION + INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException); +#endif + + // Intercept threading-related functions +#if ASAN_INTERCEPT_PTHREAD_CREATE +#if defined(ASAN_PTHREAD_CREATE_VERSION) + ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); +#else + ASAN_INTERCEPT_FUNC(pthread_create); +#endif + ASAN_INTERCEPT_FUNC(pthread_join); +#endif + + // Intercept atexit function. +#if ASAN_INTERCEPT___CXA_ATEXIT + ASAN_INTERCEPT_FUNC(__cxa_atexit); +#endif + + InitializePlatformInterceptors(); + + VReport(1, "AddressSanitizer: libc interceptors initialized\n"); +} + +} // namespace __asan + +#endif // !SANITIZER_FUCHSIA diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors.h b/contrib/compiler-rt/lib/asan/asan_interceptors.h new file mode 100644 index 000000000000..50895b167990 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interceptors.h @@ -0,0 +1,135 @@ +//===-- asan_interceptors.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_interceptors.cc +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERCEPTORS_H +#define ASAN_INTERCEPTORS_H + +#include "asan_internal.h" +#include "asan_interceptors_memintrinsics.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" + +namespace __asan { + +void InitializeAsanInterceptors(); +void InitializePlatformInterceptors(); + +#define ENSURE_ASAN_INITED() \ + do { \ + CHECK(!asan_init_is_running); \ + if (UNLIKELY(!asan_inited)) { \ + AsanInitFromRtl(); \ + } \ + } while (0) + +} // namespace __asan + +// There is no general interception at all on Fuchsia and RTEMS. +// Only the functions in asan_interceptors_memintrinsics.h are +// really defined to replace libc functions. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +// Use macro to describe if specific function should be +// intercepted on a given platform. +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1 +# define ASAN_INTERCEPT__LONGJMP 1 +# define ASAN_INTERCEPT_INDEX 1 +# define ASAN_INTERCEPT_PTHREAD_CREATE 1 +#else +# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0 +# define ASAN_INTERCEPT__LONGJMP 0 +# define ASAN_INTERCEPT_INDEX 0 +# define ASAN_INTERCEPT_PTHREAD_CREATE 0 +#endif + +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_SOLARIS +# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1 +#else +# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 +#endif + +#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS +# define ASAN_INTERCEPT_SWAPCONTEXT 1 +#else +# define ASAN_INTERCEPT_SWAPCONTEXT 0 +#endif + +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT_SIGLONGJMP 1 +#else +# define ASAN_INTERCEPT_SIGLONGJMP 0 +#endif + +#if SANITIZER_LINUX && !SANITIZER_ANDROID +# define ASAN_INTERCEPT___LONGJMP_CHK 1 +#else +# define ASAN_INTERCEPT___LONGJMP_CHK 0 +#endif + +#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \ + !SANITIZER_NETBSD +# define ASAN_INTERCEPT___CXA_THROW 1 +# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1 +# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__)) +# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1 +# else +# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 1 +# endif +#else +# define ASAN_INTERCEPT___CXA_THROW 0 +# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0 +# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 0 +# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 0 +#endif + +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT___CXA_ATEXIT 1 +#else +# define ASAN_INTERCEPT___CXA_ATEXIT 0 +#endif + +#if SANITIZER_LINUX && !SANITIZER_ANDROID +# define ASAN_INTERCEPT___STRDUP 1 +#else +# define ASAN_INTERCEPT___STRDUP 0 +#endif + +DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size) +DECLARE_REAL(char*, strchr, const char *str, int c) +DECLARE_REAL(SIZE_T, strlen, const char *s) +DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size) +DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen) +DECLARE_REAL(char*, strstr, const char *s1, const char *s2) + +#if !SANITIZER_MAC +#define ASAN_INTERCEPT_FUNC(name) \ + do { \ + if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ + VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \ + } while (0) +#define ASAN_INTERCEPT_FUNC_VER(name, ver) \ + do { \ + if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \ + VReport( \ + 1, "AddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \ + } while (0) +#else +// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION. +#define ASAN_INTERCEPT_FUNC(name) +#endif // SANITIZER_MAC + +#endif // !SANITIZER_FUCHSIA + +#endif // ASAN_INTERCEPTORS_H diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cc b/contrib/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cc new file mode 100644 index 000000000000..39e32cdad12e --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cc @@ -0,0 +1,44 @@ +//===-- asan_interceptors_memintrinsics.cc --------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan versions of memcpy, memmove, and memset. +//===---------------------------------------------------------------------===// + +#include "asan_interceptors_memintrinsics.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_suppressions.h" + +using namespace __asan; // NOLINT + +void *__asan_memcpy(void *to, const void *from, uptr size) { + ASAN_MEMCPY_IMPL(nullptr, to, from, size); +} + +void *__asan_memset(void *block, int c, uptr size) { + ASAN_MEMSET_IMPL(nullptr, block, c, size); +} + +void *__asan_memmove(void *to, const void *from, uptr size) { + ASAN_MEMMOVE_IMPL(nullptr, to, from, size); +} + +#if SANITIZER_FUCHSIA || SANITIZER_RTEMS + +// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but +// the only things there it wants are these three. Just define them +// as aliases here rather than repeating the contents. + +extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]]; +extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]]; +extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]]; + +#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h b/contrib/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h new file mode 100644 index 000000000000..a071e8f684a0 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h @@ -0,0 +1,155 @@ +//===-- asan_interceptors_memintrinsics.h -----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_memintrin.cc +//===---------------------------------------------------------------------===// +#ifndef ASAN_MEMINTRIN_H +#define ASAN_MEMINTRIN_H + +#include "asan_interface_internal.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "interception/interception.h" + +DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size) +DECLARE_REAL(void*, memset, void *block, int c, uptr size) + +namespace __asan { + +// Return true if we can quickly decide that the region is unpoisoned. +// We assume that a redzone is at least 16 bytes. +static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { + if (size == 0) return true; + if (size <= 32) + return !AddressIsPoisoned(beg) && + !AddressIsPoisoned(beg + size - 1) && + !AddressIsPoisoned(beg + size / 2); + if (size <= 64) + return !AddressIsPoisoned(beg) && + !AddressIsPoisoned(beg + size / 4) && + !AddressIsPoisoned(beg + size - 1) && + !AddressIsPoisoned(beg + 3 * size / 4) && + !AddressIsPoisoned(beg + size / 2); + return false; +} + +struct AsanInterceptorContext { + const char *interceptor_name; +}; + +// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, +// and ASAN_WRITE_RANGE as macro instead of function so +// that no extra frames are created, and stack trace contains +// relevant information only. +// We check all shadow bytes. +#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \ + uptr __offset = (uptr)(offset); \ + uptr __size = (uptr)(size); \ + uptr __bad = 0; \ + if (__offset > __offset + __size) { \ + GET_STACK_TRACE_FATAL_HERE; \ + ReportStringFunctionSizeOverflow(__offset, __size, &stack); \ + } \ + if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ + (__bad = __asan_region_is_poisoned(__offset, __size))) { \ + AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \ + bool suppressed = false; \ + if (_ctx) { \ + suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \ + if (!suppressed && HaveStackTraceBasedSuppressions()) { \ + GET_STACK_TRACE_FATAL_HERE; \ + suppressed = IsStackTraceSuppressed(&stack); \ + } \ + } \ + if (!suppressed) { \ + GET_CURRENT_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\ + } \ + } \ + } while (0) + +// memcpy is called during __asan_init() from the internals of printf(...). +// We do not treat memcpy with to==from as a bug. +// See http://llvm.org/bugs/show_bug.cgi?id=11763. +#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \ + do { \ + if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \ + if (asan_init_is_running) { \ + return REAL(memcpy)(to, from, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + if (to != from) { \ + CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \ + } \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return REAL(memcpy)(to, from, size); \ + } while (0) + +// memset is called inside Printf. +#define ASAN_MEMSET_IMPL(ctx, block, c, size) \ + do { \ + if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \ + if (asan_init_is_running) { \ + return REAL(memset)(block, c, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_WRITE_RANGE(ctx, block, size); \ + } \ + return REAL(memset)(block, c, size); \ + } while (0) + +#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \ + do { \ + if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return internal_memmove(to, from, size); \ + } while (0) + +#define ASAN_READ_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, false) +#define ASAN_WRITE_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, true) + +// Behavior of functions like "memcpy" or "strcpy" is undefined +// if memory intervals overlap. We report error in this case. +// Macro is used to avoid creation of new frames. +static inline bool RangesOverlap(const char *offset1, uptr length1, + const char *offset2, uptr length2) { + return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1)); +} +#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) \ + do { \ + const char *offset1 = (const char *)_offset1; \ + const char *offset2 = (const char *)_offset2; \ + if (RangesOverlap(offset1, length1, offset2, length2)) { \ + GET_STACK_TRACE_FATAL_HERE; \ + bool suppressed = IsInterceptorSuppressed(name); \ + if (!suppressed && HaveStackTraceBasedSuppressions()) { \ + suppressed = IsStackTraceSuppressed(&stack); \ + } \ + if (!suppressed) { \ + ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ + offset2, length2, &stack); \ + } \ + } \ + } while (0) + +} // namespace __asan + +#endif // ASAN_MEMINTRIN_H diff --git a/contrib/compiler-rt/lib/asan/asan_interface.inc b/contrib/compiler-rt/lib/asan/asan_interface.inc new file mode 100644 index 000000000000..e65f61722b10 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interface.inc @@ -0,0 +1,169 @@ +//===-- asan_interface.inc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// Asan interface list. +//===----------------------------------------------------------------------===// +INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack) +INTERFACE_FUNCTION(__asan_address_is_poisoned) +INTERFACE_FUNCTION(__asan_after_dynamic_init) +INTERFACE_FUNCTION(__asan_alloca_poison) +INTERFACE_FUNCTION(__asan_allocas_unpoison) +INTERFACE_FUNCTION(__asan_before_dynamic_init) +INTERFACE_FUNCTION(__asan_describe_address) +INTERFACE_FUNCTION(__asan_exp_load1) +INTERFACE_FUNCTION(__asan_exp_load2) +INTERFACE_FUNCTION(__asan_exp_load4) +INTERFACE_FUNCTION(__asan_exp_load8) +INTERFACE_FUNCTION(__asan_exp_load16) +INTERFACE_FUNCTION(__asan_exp_loadN) +INTERFACE_FUNCTION(__asan_exp_store1) +INTERFACE_FUNCTION(__asan_exp_store2) +INTERFACE_FUNCTION(__asan_exp_store4) +INTERFACE_FUNCTION(__asan_exp_store8) +INTERFACE_FUNCTION(__asan_exp_store16) +INTERFACE_FUNCTION(__asan_exp_storeN) +INTERFACE_FUNCTION(__asan_get_alloc_stack) +INTERFACE_FUNCTION(__asan_get_current_fake_stack) +INTERFACE_FUNCTION(__asan_get_free_stack) +INTERFACE_FUNCTION(__asan_get_report_access_size) +INTERFACE_FUNCTION(__asan_get_report_access_type) +INTERFACE_FUNCTION(__asan_get_report_address) +INTERFACE_FUNCTION(__asan_get_report_bp) +INTERFACE_FUNCTION(__asan_get_report_description) +INTERFACE_FUNCTION(__asan_get_report_pc) +INTERFACE_FUNCTION(__asan_get_report_sp) +INTERFACE_FUNCTION(__asan_get_shadow_mapping) +INTERFACE_FUNCTION(__asan_handle_no_return) +INTERFACE_FUNCTION(__asan_init) +INTERFACE_FUNCTION(__asan_load_cxx_array_cookie) +INTERFACE_FUNCTION(__asan_load1) +INTERFACE_FUNCTION(__asan_load2) +INTERFACE_FUNCTION(__asan_load4) +INTERFACE_FUNCTION(__asan_load8) +INTERFACE_FUNCTION(__asan_load16) +INTERFACE_FUNCTION(__asan_loadN) +INTERFACE_FUNCTION(__asan_load1_noabort) +INTERFACE_FUNCTION(__asan_load2_noabort) +INTERFACE_FUNCTION(__asan_load4_noabort) +INTERFACE_FUNCTION(__asan_load8_noabort) +INTERFACE_FUNCTION(__asan_load16_noabort) +INTERFACE_FUNCTION(__asan_loadN_noabort) +INTERFACE_FUNCTION(__asan_locate_address) +INTERFACE_FUNCTION(__asan_memcpy) +INTERFACE_FUNCTION(__asan_memmove) +INTERFACE_FUNCTION(__asan_memset) +INTERFACE_FUNCTION(__asan_poison_cxx_array_cookie) +INTERFACE_FUNCTION(__asan_poison_intra_object_redzone) +INTERFACE_FUNCTION(__asan_poison_memory_region) +INTERFACE_FUNCTION(__asan_poison_stack_memory) +INTERFACE_FUNCTION(__asan_print_accumulated_stats) +INTERFACE_FUNCTION(__asan_region_is_poisoned) +INTERFACE_FUNCTION(__asan_register_globals) +INTERFACE_FUNCTION(__asan_register_elf_globals) +INTERFACE_FUNCTION(__asan_register_image_globals) +INTERFACE_FUNCTION(__asan_report_error) +INTERFACE_FUNCTION(__asan_report_exp_load1) +INTERFACE_FUNCTION(__asan_report_exp_load2) +INTERFACE_FUNCTION(__asan_report_exp_load4) +INTERFACE_FUNCTION(__asan_report_exp_load8) +INTERFACE_FUNCTION(__asan_report_exp_load16) +INTERFACE_FUNCTION(__asan_report_exp_load_n) +INTERFACE_FUNCTION(__asan_report_exp_store1) +INTERFACE_FUNCTION(__asan_report_exp_store2) +INTERFACE_FUNCTION(__asan_report_exp_store4) +INTERFACE_FUNCTION(__asan_report_exp_store8) +INTERFACE_FUNCTION(__asan_report_exp_store16) +INTERFACE_FUNCTION(__asan_report_exp_store_n) +INTERFACE_FUNCTION(__asan_report_load1) +INTERFACE_FUNCTION(__asan_report_load2) +INTERFACE_FUNCTION(__asan_report_load4) +INTERFACE_FUNCTION(__asan_report_load8) +INTERFACE_FUNCTION(__asan_report_load16) +INTERFACE_FUNCTION(__asan_report_load_n) +INTERFACE_FUNCTION(__asan_report_load1_noabort) +INTERFACE_FUNCTION(__asan_report_load2_noabort) +INTERFACE_FUNCTION(__asan_report_load4_noabort) +INTERFACE_FUNCTION(__asan_report_load8_noabort) +INTERFACE_FUNCTION(__asan_report_load16_noabort) +INTERFACE_FUNCTION(__asan_report_load_n_noabort) +INTERFACE_FUNCTION(__asan_report_present) +INTERFACE_FUNCTION(__asan_report_store1) +INTERFACE_FUNCTION(__asan_report_store2) +INTERFACE_FUNCTION(__asan_report_store4) +INTERFACE_FUNCTION(__asan_report_store8) +INTERFACE_FUNCTION(__asan_report_store16) +INTERFACE_FUNCTION(__asan_report_store_n) +INTERFACE_FUNCTION(__asan_report_store1_noabort) +INTERFACE_FUNCTION(__asan_report_store2_noabort) +INTERFACE_FUNCTION(__asan_report_store4_noabort) +INTERFACE_FUNCTION(__asan_report_store8_noabort) +INTERFACE_FUNCTION(__asan_report_store16_noabort) +INTERFACE_FUNCTION(__asan_report_store_n_noabort) +INTERFACE_FUNCTION(__asan_set_death_callback) +INTERFACE_FUNCTION(__asan_set_error_report_callback) +INTERFACE_FUNCTION(__asan_set_shadow_00) +INTERFACE_FUNCTION(__asan_set_shadow_f1) +INTERFACE_FUNCTION(__asan_set_shadow_f2) +INTERFACE_FUNCTION(__asan_set_shadow_f3) +INTERFACE_FUNCTION(__asan_set_shadow_f5) +INTERFACE_FUNCTION(__asan_set_shadow_f8) +INTERFACE_FUNCTION(__asan_stack_free_0) +INTERFACE_FUNCTION(__asan_stack_free_1) +INTERFACE_FUNCTION(__asan_stack_free_2) +INTERFACE_FUNCTION(__asan_stack_free_3) +INTERFACE_FUNCTION(__asan_stack_free_4) +INTERFACE_FUNCTION(__asan_stack_free_5) +INTERFACE_FUNCTION(__asan_stack_free_6) +INTERFACE_FUNCTION(__asan_stack_free_7) +INTERFACE_FUNCTION(__asan_stack_free_8) +INTERFACE_FUNCTION(__asan_stack_free_9) +INTERFACE_FUNCTION(__asan_stack_free_10) +INTERFACE_FUNCTION(__asan_stack_malloc_0) +INTERFACE_FUNCTION(__asan_stack_malloc_1) +INTERFACE_FUNCTION(__asan_stack_malloc_2) +INTERFACE_FUNCTION(__asan_stack_malloc_3) +INTERFACE_FUNCTION(__asan_stack_malloc_4) +INTERFACE_FUNCTION(__asan_stack_malloc_5) +INTERFACE_FUNCTION(__asan_stack_malloc_6) +INTERFACE_FUNCTION(__asan_stack_malloc_7) +INTERFACE_FUNCTION(__asan_stack_malloc_8) +INTERFACE_FUNCTION(__asan_stack_malloc_9) +INTERFACE_FUNCTION(__asan_stack_malloc_10) +INTERFACE_FUNCTION(__asan_store1) +INTERFACE_FUNCTION(__asan_store2) +INTERFACE_FUNCTION(__asan_store4) +INTERFACE_FUNCTION(__asan_store8) +INTERFACE_FUNCTION(__asan_store16) +INTERFACE_FUNCTION(__asan_storeN) +INTERFACE_FUNCTION(__asan_store1_noabort) +INTERFACE_FUNCTION(__asan_store2_noabort) +INTERFACE_FUNCTION(__asan_store4_noabort) +INTERFACE_FUNCTION(__asan_store8_noabort) +INTERFACE_FUNCTION(__asan_store16_noabort) +INTERFACE_FUNCTION(__asan_storeN_noabort) +INTERFACE_FUNCTION(__asan_unpoison_intra_object_redzone) +INTERFACE_FUNCTION(__asan_unpoison_memory_region) +INTERFACE_FUNCTION(__asan_unpoison_stack_memory) +INTERFACE_FUNCTION(__asan_unregister_globals) +INTERFACE_FUNCTION(__asan_unregister_elf_globals) +INTERFACE_FUNCTION(__asan_unregister_image_globals) +INTERFACE_FUNCTION(__asan_version_mismatch_check_v8) +INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber) +INTERFACE_FUNCTION(__sanitizer_print_stack_trace) +INTERFACE_FUNCTION(__sanitizer_ptr_cmp) +INTERFACE_FUNCTION(__sanitizer_ptr_sub) +INTERFACE_FUNCTION(__sanitizer_start_switch_fiber) +INTERFACE_FUNCTION(__sanitizer_unaligned_load16) +INTERFACE_FUNCTION(__sanitizer_unaligned_load32) +INTERFACE_FUNCTION(__sanitizer_unaligned_load64) +INTERFACE_FUNCTION(__sanitizer_unaligned_store16) +INTERFACE_FUNCTION(__sanitizer_unaligned_store32) +INTERFACE_FUNCTION(__sanitizer_unaligned_store64) +INTERFACE_WEAK_FUNCTION(__asan_default_options) +INTERFACE_WEAK_FUNCTION(__asan_default_suppressions) +INTERFACE_WEAK_FUNCTION(__asan_on_error) diff --git a/contrib/compiler-rt/lib/asan/asan_interface_internal.h b/contrib/compiler-rt/lib/asan/asan_interface_internal.h new file mode 100644 index 000000000000..b974c0cc4b43 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interface_internal.h @@ -0,0 +1,255 @@ +//===-- asan_interface_internal.h -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This header declares the AddressSanitizer runtime interface functions. +// The runtime library has to define these functions so the instrumented program +// could call them. +// +// See also include/sanitizer/asan_interface.h +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERFACE_INTERNAL_H +#define ASAN_INTERFACE_INTERNAL_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +#include "asan_init_version.h" + +using __sanitizer::uptr; +using __sanitizer::u64; +using __sanitizer::u32; + +extern "C" { + // This function should be called at the very beginning of the process, + // before any instrumented code is executed and before any call to malloc. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_init(); + + // This function exists purely to get a linker/loader error when using + // incompatible versions of instrumentation and runtime library. Please note + // that __asan_version_mismatch_check is a macro that is replaced with + // __asan_version_mismatch_check_vXXX at compile-time. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check(); + + // This structure is used to describe the source location of a place where + // global was defined. + struct __asan_global_source_location { + const char *filename; + int line_no; + int column_no; + }; + + // This structure describes an instrumented global variable. + struct __asan_global { + uptr beg; // The address of the global. + uptr size; // The original size of the global. + uptr size_with_redzone; // The size with the redzone. + const char *name; // Name as a C string. + const char *module_name; // Module name as a C string. This pointer is a + // unique identifier of a module. + uptr has_dynamic_init; // Non-zero if the global has dynamic initializer. + __asan_global_source_location *location; // Source location of a global, + // or NULL if it is unknown. + uptr odr_indicator; // The address of the ODR indicator symbol. + }; + + // These functions can be called on some platforms to find globals in the same + // loaded image as `flag' and apply __asan_(un)register_globals to them, + // filtering out redundant calls. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_register_image_globals(uptr *flag); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unregister_image_globals(uptr *flag); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_register_elf_globals(uptr *flag, void *start, void *stop); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop); + + // These two functions should be called by the instrumented code. + // 'globals' is an array of structures describing 'n' globals. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_register_globals(__asan_global *globals, uptr n); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unregister_globals(__asan_global *globals, uptr n); + + // These two functions should be called before and after dynamic initializers + // of a single module run, respectively. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_before_dynamic_init(const char *module_name); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_after_dynamic_init(); + + // Sets bytes of the given range of the shadow memory into specific value. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_shadow_00(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_shadow_f1(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_shadow_f2(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_shadow_f3(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_shadow_f5(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_shadow_f8(uptr addr, uptr size); + + // These two functions are used by instrumented code in the + // use-after-scope mode. They mark memory for local variables as + // unaddressable when they leave scope and addressable before the + // function exits. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_stack_memory(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unpoison_stack_memory(uptr addr, uptr size); + + // Performs cleanup before a NoReturn function. Must be called before things + // like _exit and execl to avoid false positives on stack. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_no_return(); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_memory_region(void const volatile *addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unpoison_memory_region(void const volatile *addr, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_address_is_poisoned(void const volatile *addr); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_region_is_poisoned(uptr beg, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_describe_address(uptr addr); + + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_report_present(); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_pc(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_bp(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_sp(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_address(); + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_get_report_access_type(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_access_size(); + SANITIZER_INTERFACE_ATTRIBUTE + const char * __asan_get_report_description(); + + SANITIZER_INTERFACE_ATTRIBUTE + const char * __asan_locate_address(uptr addr, char *name, uptr name_size, + uptr *region_address, uptr *region_size); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, + u32 *thread_id); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, + u32 *thread_id); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_report_error(uptr pc, uptr bp, uptr sp, + uptr addr, int is_write, uptr access_size, u32 exp); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_death_callback(void (*callback)(void)); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_error_report_callback(void (*callback)(const char*)); + + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE + void __asan_on_error(); + + SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats(); + + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE + const char* __asan_default_options(); + + SANITIZER_INTERFACE_ATTRIBUTE + extern uptr __asan_shadow_memory_dynamic_address; + + // Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return + SANITIZER_INTERFACE_ATTRIBUTE + extern int __asan_option_detect_stack_use_after_return; + + SANITIZER_INTERFACE_ATTRIBUTE + extern uptr *__asan_test_only_reported_buggy_pointer; + + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16_noabort(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN_noabort(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN_noabort(uptr p, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size, + u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size, + u32 exp); + + SANITIZER_INTERFACE_ATTRIBUTE + void* __asan_memcpy(void *dst, const void *src, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void* __asan_memset(void *s, int c, uptr n); + SANITIZER_INTERFACE_ATTRIBUTE + void* __asan_memmove(void* dest, const void* src, uptr n); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_cxx_array_cookie(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_load_cxx_array_cookie(uptr *p); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_intra_object_redzone(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unpoison_intra_object_redzone(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_alloca_poison(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_allocas_unpoison(uptr top, uptr bottom); + + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE + const char* __asan_default_suppressions(); +} // extern "C" + +#endif // ASAN_INTERFACE_INTERNAL_H diff --git a/contrib/compiler-rt/lib/asan/asan_internal.h b/contrib/compiler-rt/lib/asan/asan_internal.h new file mode 100644 index 000000000000..57869497c7d3 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_internal.h @@ -0,0 +1,164 @@ +//===-- asan_internal.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header which defines various general utilities. +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERNAL_H +#define ASAN_INTERNAL_H + +#include "asan_flags.h" +#include "asan_interface_internal.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_libc.h" + +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +# error "The AddressSanitizer run-time should not be" + " instrumented by AddressSanitizer" +#endif + +// Build-time configuration options. + +// If set, asan will intercept C++ exception api call(s). +#ifndef ASAN_HAS_EXCEPTIONS +# define ASAN_HAS_EXCEPTIONS 1 +#endif + +// If set, values like allocator chunk size, as well as defaults for some flags +// will be changed towards less memory overhead. +#ifndef ASAN_LOW_MEMORY +# if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS +# define ASAN_LOW_MEMORY 1 +# else +# define ASAN_LOW_MEMORY 0 +# endif +#endif + +#ifndef ASAN_DYNAMIC +# ifdef PIC +# define ASAN_DYNAMIC 1 +# else +# define ASAN_DYNAMIC 0 +# endif +#endif + +// All internal functions in asan reside inside the __asan namespace +// to avoid namespace collisions with the user programs. +// Separate namespace also makes it simpler to distinguish the asan run-time +// functions from the instrumented user code in a profile. +namespace __asan { + +class AsanThread; +using __sanitizer::StackTrace; + +void AsanInitFromRtl(); + +// asan_win.cc +void InitializePlatformExceptionHandlers(); +// Returns whether an address is a valid allocated system heap block. +// 'addr' must point to the beginning of the block. +bool IsSystemHeapAddress(uptr addr); + +// asan_rtl.cc +void PrintAddressSpaceLayout(); +void NORETURN ShowStatsAndAbort(); + +// asan_shadow_setup.cc +void InitializeShadowMemory(); + +// asan_malloc_linux.cc / asan_malloc_mac.cc +void ReplaceSystemMalloc(); + +// asan_linux.cc / asan_mac.cc / asan_rtems.cc / asan_win.cc +uptr FindDynamicShadowStart(); +void *AsanDoesNotSupportStaticLinkage(); +void AsanCheckDynamicRTPrereqs(); +void AsanCheckIncompatibleRT(); + +// asan_thread.cc +AsanThread *CreateMainThread(); + +// Support function for __asan_(un)register_image_globals. Searches for the +// loaded image containing `needle' and then enumerates all global metadata +// structures declared in that image, applying `op' (e.g., +// __asan_(un)register_globals) to them. +typedef void (*globals_op_fptr)(__asan_global *, uptr); +void AsanApplyToGlobals(globals_op_fptr op, const void *needle); + +void AsanOnDeadlySignal(int, void *siginfo, void *context); + +void ReadContextStack(void *context, uptr *stack, uptr *ssize); +void StopInitOrderChecking(); + +// Wrapper for TLS/TSD. +void AsanTSDInit(void (*destructor)(void *tsd)); +void *AsanTSDGet(); +void AsanTSDSet(void *tsd); +void PlatformTSDDtor(void *tsd); + +void AppendToErrorMessageBuffer(const char *buffer); + +void *AsanDlSymNext(const char *sym); + +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name); + +// Returns `true` iff most of ASan init process should be skipped due to the +// ASan library being loaded via `dlopen()`. Platforms may perform any +// `dlopen()` specific initialization inside this function. +bool HandleDlopenInit(); + +// Add convenient macro for interface functions that may be represented as +// weak hooks. +#define ASAN_MALLOC_HOOK(ptr, size) \ + do { \ + if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \ + RunMallocHooks(ptr, size); \ + } while (false) +#define ASAN_FREE_HOOK(ptr) \ + do { \ + if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \ + RunFreeHooks(ptr); \ + } while (false) +#define ASAN_ON_ERROR() \ + if (&__asan_on_error) __asan_on_error() + +extern int asan_inited; +// Used to avoid infinite recursion in __asan_init(). +extern bool asan_init_is_running; +extern void (*death_callback)(void); +// These magic values are written to shadow for better error reporting. +const int kAsanHeapLeftRedzoneMagic = 0xfa; +const int kAsanHeapFreeMagic = 0xfd; +const int kAsanStackLeftRedzoneMagic = 0xf1; +const int kAsanStackMidRedzoneMagic = 0xf2; +const int kAsanStackRightRedzoneMagic = 0xf3; +const int kAsanStackAfterReturnMagic = 0xf5; +const int kAsanInitializationOrderMagic = 0xf6; +const int kAsanUserPoisonedMemoryMagic = 0xf7; +const int kAsanContiguousContainerOOBMagic = 0xfc; +const int kAsanStackUseAfterScopeMagic = 0xf8; +const int kAsanGlobalRedzoneMagic = 0xf9; +const int kAsanInternalHeapMagic = 0xfe; +const int kAsanArrayCookieMagic = 0xac; +const int kAsanIntraObjectRedzone = 0xbb; +const int kAsanAllocaLeftMagic = 0xca; +const int kAsanAllocaRightMagic = 0xcb; +// Used to populate the shadow gap for systems without memory +// protection there (i.e. Myriad). +const int kAsanShadowGap = 0xcc; + +static const uptr kCurrentStackFrameMagic = 0x41B58AB3; +static const uptr kRetiredStackFrameMagic = 0x45E0360E; + +} // namespace __asan + +#endif // ASAN_INTERNAL_H diff --git a/contrib/compiler-rt/lib/asan/asan_linux.cc b/contrib/compiler-rt/lib/asan/asan_linux.cc new file mode 100644 index 000000000000..a150b1955d60 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_linux.cc @@ -0,0 +1,261 @@ +//===-- asan_linux.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_SOLARIS + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_premap_shadow.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_freebsd.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#include <sys/time.h> +#include <sys/resource.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <dlfcn.h> +#include <fcntl.h> +#include <limits.h> +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> +#include <unwind.h> + +#if SANITIZER_FREEBSD +#include <sys/link_elf.h> +#endif + +#if SANITIZER_SOLARIS +#include <link.h> +#endif + +#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS +#include <ucontext.h> +extern "C" void* _DYNAMIC; +#elif SANITIZER_NETBSD +#include <link_elf.h> +#include <ucontext.h> +extern Elf_Dyn _DYNAMIC; +#else +#include <sys/ucontext.h> +#include <link.h> +#endif + +// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in +// 32-bit mode. +#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \ + __FreeBSD_version <= 902001 // v9.2 +#define ucontext_t xucontext_t +#endif + +typedef enum { + ASAN_RT_VERSION_UNDEFINED = 0, + ASAN_RT_VERSION_DYNAMIC, + ASAN_RT_VERSION_STATIC, +} asan_rt_version_t; + +// FIXME: perhaps also store abi version here? +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +asan_rt_version_t __asan_rt_version; +} + +namespace __asan { + +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} +bool IsSystemHeapAddress (uptr addr) { return false; } + +void *AsanDoesNotSupportStaticLinkage() { + // This will fail to link with -static. + return &_DYNAMIC; // defined in link.h +} + +static void UnmapFromTo(uptr from, uptr to) { + CHECK(to >= from); + if (to == from) return; + uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from); + if (UNLIKELY(internal_iserror(res))) { + Report( + "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address " + "%p\n", + to - from, to - from, from); + CHECK("unable to unmap" && 0); + } +} + +#if ASAN_PREMAP_SHADOW +uptr FindPremappedShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow); + uptr premap_shadow_size = PremapShadowSize(); + uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); + // We may have mapped too much. Release extra memory. + UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); + return shadow_start; +} +#endif + +uptr FindDynamicShadowStart() { +#if ASAN_PREMAP_SHADOW + if (!PremapShadowFailed()) + return FindPremappedShadowStart(); +#endif + + uptr granularity = GetMmapGranularity(); + uptr alignment = granularity * 8; + uptr left_padding = granularity; + uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); + uptr map_size = shadow_size + left_padding + alignment; + + uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + UnmapFromTo(map_start, shadow_start - left_padding); + UnmapFromTo(shadow_start + shadow_size, map_start + map_size); + + return shadow_start; +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +#if SANITIZER_ANDROID +// FIXME: should we do anything for Android? +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +#else +static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, + void *data) { + VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", + info->dlpi_name, info->dlpi_addr); + + // Continue until the first dynamic library is found + if (!info->dlpi_name || info->dlpi_name[0] == 0) + return 0; + + // Ignore vDSO + if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) + return 0; + +#if SANITIZER_FREEBSD || SANITIZER_NETBSD + // Ignore first entry (the main program) + char **p = (char **)data; + if (!(*p)) { + *p = (char *)-1; + return 0; + } +#endif + +#if SANITIZER_SOLARIS + // Ignore executable on Solaris + if (info->dlpi_addr == 0) + return 0; +#endif + + *(const char **)data = info->dlpi_name; + return 1; +} + +static bool IsDynamicRTName(const char *libname) { + return internal_strstr(libname, "libclang_rt.asan") || + internal_strstr(libname, "libasan.so"); +} + +static void ReportIncompatibleRT() { + Report("Your application is linked against incompatible ASan runtimes.\n"); + Die(); +} + +void AsanCheckDynamicRTPrereqs() { + if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order) + return; + + // Ensure that dynamic RT is the first DSO in the list + const char *first_dso_name = nullptr; + dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); + if (first_dso_name && !IsDynamicRTName(first_dso_name)) { + Report("ASan runtime does not come first in initial library list; " + "you should either link runtime to your application or " + "manually preload it with LD_PRELOAD.\n"); + Die(); + } +} + +void AsanCheckIncompatibleRT() { + if (ASAN_DYNAMIC) { + if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { + __asan_rt_version = ASAN_RT_VERSION_DYNAMIC; + } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) { + ReportIncompatibleRT(); + } + } else { + if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { + // Ensure that dynamic runtime is not present. We should detect it + // as early as possible, otherwise ASan interceptors could bind to + // the functions in dynamic ASan runtime instead of the functions in + // system libraries, causing crashes later in ASan initialization. + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + char filename[PATH_MAX]; + MemoryMappedSegment segment(filename, sizeof(filename)); + while (proc_maps.Next(&segment)) { + if (IsDynamicRTName(segment.filename)) { + Report("Your application is linked against " + "incompatible ASan runtimes.\n"); + Die(); + } + } + __asan_rt_version = ASAN_RT_VERSION_STATIC; + } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) { + ReportIncompatibleRT(); + } + } +} +#endif // SANITIZER_ANDROID + +#if !SANITIZER_ANDROID +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + ucontext_t *ucp = (ucontext_t*)context; + *stack = (uptr)ucp->uc_stack.ss_sp; + *ssize = ucp->uc_stack.ss_size; +} +#else +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} +#endif + +void *AsanDlSymNext(const char *sym) { + return dlsym(RTLD_NEXT, sym); +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} + +} // namespace __asan + +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || + // SANITIZER_SOLARIS diff --git a/contrib/compiler-rt/lib/asan/asan_lock.h b/contrib/compiler-rt/lib/asan/asan_lock.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_lock.h diff --git a/contrib/compiler-rt/lib/asan/asan_mac.cc b/contrib/compiler-rt/lib/asan/asan_mac.cc new file mode 100644 index 000000000000..17a0ec57701d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_mac.cc @@ -0,0 +1,332 @@ +//===-- asan_mac.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mac.h" + +#include <dlfcn.h> +#include <fcntl.h> +#include <libkern/OSAtomic.h> +#include <mach-o/dyld.h> +#include <mach-o/getsect.h> +#include <mach-o/loader.h> +#include <pthread.h> +#include <stdlib.h> // for free() +#include <sys/mman.h> +#include <sys/resource.h> +#include <sys/sysctl.h> +#include <sys/ucontext.h> +#include <unistd.h> + +// from <crt_externs.h>, but we don't have that file on iOS +extern "C" { + extern char ***_NSGetArgv(void); + extern char ***_NSGetEnviron(void); +} + +namespace __asan { + +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} +bool IsSystemHeapAddress (uptr addr) { return false; } + +// No-op. Mac does not support static linkage anyway. +void *AsanDoesNotSupportStaticLinkage() { + return 0; +} + +uptr FindDynamicShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr alignment = 8 * granularity; + uptr left_padding = granularity; + uptr space_size = kHighShadowEnd + left_padding; + + uptr largest_gap_found = 0; + uptr max_occupied_addr = 0; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + uptr shadow_start = + FindAvailableMemoryRange(space_size, alignment, granularity, + &largest_gap_found, &max_occupied_addr); + // If the shadow doesn't fit, restrict the address space to make it fit. + if (shadow_start == 0) { + VReport( + 2, + "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", + largest_gap_found, max_occupied_addr); + uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); + if (new_max_vm < max_occupied_addr) { + Report("Unable to find a memory range for dynamic shadow.\n"); + Report( + "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " + "new_max_vm = %p\n", + space_size, largest_gap_found, max_occupied_addr, new_max_vm); + CHECK(0 && "cannot place shadow"); + } + RestrictMemoryToMaxAddress(new_max_vm); + kHighMemEnd = new_max_vm - 1; + space_size = kHighShadowEnd + left_padding; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, + nullptr, nullptr); + if (shadow_start == 0) { + Report("Unable to find a memory range after restricting VM.\n"); + CHECK(0 && "cannot place shadow after restricting vm"); + } + } + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + +// No-op. Mac does not support static linkage anyway. +void AsanCheckDynamicRTPrereqs() {} + +// No-op. Mac does not support static linkage anyway. +void AsanCheckIncompatibleRT() {} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + // Find the Mach-O header for the image containing the needle + Dl_info info; + int err = dladdr(needle, &info); + if (err == 0) return; + +#if __LP64__ + const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase; +#else + const struct mach_header *mh = (struct mach_header *)info.dli_fbase; +#endif + + // Look up the __asan_globals section in that image and register its globals + unsigned long size = 0; + __asan_global *globals = (__asan_global *)getsectiondata( + mh, + "__DATA", "__asan_globals", + &size); + + if (!globals) return; + if (size % sizeof(__asan_global) != 0) return; + op(globals, size / sizeof(__asan_global)); +} + +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} + +// Support for the following functions from libdispatch on Mac OS: +// dispatch_async_f() +// dispatch_async() +// dispatch_sync_f() +// dispatch_sync() +// dispatch_after_f() +// dispatch_after() +// dispatch_group_async_f() +// dispatch_group_async() +// TODO(glider): libdispatch API contains other functions that we don't support +// yet. +// +// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are +// they can cause jobs to run on a thread different from the current one. +// TODO(glider): if so, we need a test for this (otherwise we should remove +// them). +// +// The following functions use dispatch_barrier_async_f() (which isn't a library +// function but is exported) and are thus supported: +// dispatch_source_set_cancel_handler_f() +// dispatch_source_set_cancel_handler() +// dispatch_source_set_event_handler_f() +// dispatch_source_set_event_handler() +// +// The reference manual for Grand Central Dispatch is available at +// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html +// The implementation details are at +// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c + +typedef void* dispatch_group_t; +typedef void* dispatch_queue_t; +typedef void* dispatch_source_t; +typedef u64 dispatch_time_t; +typedef void (*dispatch_function_t)(void *block); +typedef void* (*worker_t)(void *block); + +// A wrapper for the ObjC blocks used to support libdispatch. +typedef struct { + void *block; + dispatch_function_t func; + u32 parent_tid; +} asan_block_context_t; + +ALWAYS_INLINE +void asan_register_worker_thread(int parent_tid, StackTrace *stack) { + AsanThread *t = GetCurrentThread(); + if (!t) { + t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, + parent_tid, stack, /* detached */ true); + t->Init(); + asanThreadRegistry().StartThread(t->tid(), GetTid(), + /* workerthread */ true, 0); + SetCurrentThread(t); + } +} + +// For use by only those functions that allocated the context via +// alloc_asan_context(). +extern "C" +void asan_dispatch_call_block_and_release(void *block) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *context = (asan_block_context_t*)block; + VReport(2, + "asan_dispatch_call_block_and_release(): " + "context: %p, pthread_self: %p\n", + block, pthread_self()); + asan_register_worker_thread(context->parent_tid, &stack); + // Call the original dispatcher for the block. + context->func(context->block); + asan_free(context, &stack, FROM_MALLOC); +} + +} // namespace __asan + +using namespace __asan; // NOLINT + +// Wrap |ctxt| and |func| into an asan_block_context_t. +// The caller retains control of the allocated context. +extern "C" +asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, + BufferedStackTrace *stack) { + asan_block_context_t *asan_ctxt = + (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); + asan_ctxt->block = ctxt; + asan_ctxt->func = func; + asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); + return asan_ctxt; +} + +// Define interceptor for dispatch_*_f function with the three most common +// parameters: dispatch_queue_t, context, dispatch_function_t. +#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ + INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ + dispatch_function_t func) { \ + GET_STACK_TRACE_THREAD; \ + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ + if (Verbosity() >= 2) { \ + Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ + asan_ctxt, pthread_self()); \ + PRINT_CURRENT_STACK(); \ + } \ + return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \ + asan_dispatch_call_block_and_release); \ + } + +INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) + +INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, + dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); + if (Verbosity() >= 2) { + Report("dispatch_after_f: %p\n", asan_ctxt); + PRINT_CURRENT_STACK(); + } + return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt, + asan_dispatch_call_block_and_release); +} + +INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, + dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); + if (Verbosity() >= 2) { + Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", + asan_ctxt, pthread_self()); + PRINT_CURRENT_STACK(); + } + REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt, + asan_dispatch_call_block_and_release); +} + +#if !defined(MISSING_BLOCKS_SUPPORT) +extern "C" { +void dispatch_async(dispatch_queue_t dq, void(^work)(void)); +void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + void(^work)(void)); +void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void(^work)(void)); +void dispatch_source_set_cancel_handler(dispatch_source_t ds, + void(^work)(void)); +void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); +} + +#define GET_ASAN_BLOCK(work) \ + void (^asan_block)(void); \ + int parent_tid = GetCurrentTidOrInvalid(); \ + asan_block = ^(void) { \ + GET_STACK_TRACE_THREAD; \ + asan_register_worker_thread(parent_tid, &stack); \ + work(); \ + } + +INTERCEPTOR(void, dispatch_async, + dispatch_queue_t dq, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_async)(dq, asan_block); +} + +INTERCEPTOR(void, dispatch_group_async, + dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_group_async)(dg, dq, asan_block); +} + +INTERCEPTOR(void, dispatch_after, + dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_after)(when, queue, asan_block); +} + +INTERCEPTOR(void, dispatch_source_set_cancel_handler, + dispatch_source_t ds, void(^work)(void)) { + if (!work) { + REAL(dispatch_source_set_cancel_handler)(ds, work); + return; + } + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_source_set_cancel_handler)(ds, asan_block); +} + +INTERCEPTOR(void, dispatch_source_set_event_handler, + dispatch_source_t ds, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_source_set_event_handler)(ds, asan_block); +} +#endif + +#endif // SANITIZER_MAC diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_linux.cc b/contrib/compiler-rt/lib/asan/asan_malloc_linux.cc new file mode 100644 index 000000000000..0a534fe21168 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_linux.cc @@ -0,0 +1,300 @@ +//===-- asan_malloc_linux.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific malloc interception. +// We simply define functions like malloc, free, realloc, etc. +// They will replace the corresponding libc functions automagically. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \ + SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS + +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_malloc_local.h" +#include "asan_stack.h" + +// ---------------------- Replacement functions ---------------- {{{1 +using namespace __asan; // NOLINT + +static uptr allocated_for_dlsym; +static uptr last_dlsym_alloc_size_in_words; +static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024; +static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + +static INLINE bool IsInDlsymAllocPool(const void *ptr) { + uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]); +} + +static void *AllocateFromLocalPool(uptr size_in_bytes) { + uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; + void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym]; + last_dlsym_alloc_size_in_words = size_in_words; + allocated_for_dlsym += size_in_words; + CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); + return mem; +} + +static void DeallocateFromLocalPool(const void *ptr) { + // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store + // error messages and instead uses malloc followed by free. To avoid pool + // exhaustion due to long object filenames, handle that special case here. + uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words; + void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset]; + if (prev_mem == ptr) { + REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize); + allocated_for_dlsym = prev_offset; + last_dlsym_alloc_size_in_words = 0; + } +} + +static int PosixMemalignFromLocalPool(void **memptr, uptr alignment, + uptr size_in_bytes) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) + return errno_EINVAL; + + CHECK(alignment >= kWordSize); + + uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym]; + uptr aligned_addr = RoundUpTo(addr, alignment); + uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize); + + uptr *end_mem = (uptr*)(aligned_addr + aligned_size); + uptr allocated = end_mem - alloc_memory_for_dlsym; + if (allocated >= kDlsymAllocPoolSize) + return errno_ENOMEM; + + allocated_for_dlsym = allocated; + *memptr = (void*)aligned_addr; + return 0; +} + +#if SANITIZER_RTEMS +void* MemalignFromLocalPool(uptr alignment, uptr size) { + void *ptr = nullptr; + alignment = Max(alignment, kWordSize); + PosixMemalignFromLocalPool(&ptr, alignment, size); + return ptr; +} + +bool IsFromLocalPool(const void *ptr) { + return IsInDlsymAllocPool(ptr); +} +#endif + +static INLINE bool MaybeInDlsym() { + // Fuchsia doesn't use dlsym-based interceptors. + return !SANITIZER_FUCHSIA && asan_init_is_running; +} + +static INLINE bool UseLocalPool() { + return EarlyMalloc() || MaybeInDlsym(); +} + +static void *ReallocFromLocalPool(void *ptr, uptr size) { + const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); + void *new_ptr; + if (UNLIKELY(UseLocalPool())) { + new_ptr = AllocateFromLocalPool(size); + } else { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + new_ptr = asan_malloc(size, &stack); + } + internal_memcpy(new_ptr, ptr, copy_size); + return new_ptr; +} + +INTERCEPTOR(void, free, void *ptr) { + GET_STACK_TRACE_FREE; + if (UNLIKELY(IsInDlsymAllocPool(ptr))) { + DeallocateFromLocalPool(ptr); + return; + } + asan_free(ptr, &stack, FROM_MALLOC); +} + +#if SANITIZER_INTERCEPT_CFREE +INTERCEPTOR(void, cfree, void *ptr) { + GET_STACK_TRACE_FREE; + if (UNLIKELY(IsInDlsymAllocPool(ptr))) + return; + asan_free(ptr, &stack, FROM_MALLOC); +} +#endif // SANITIZER_INTERCEPT_CFREE + +INTERCEPTOR(void*, malloc, uptr size) { + if (UNLIKELY(UseLocalPool())) + // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. + return AllocateFromLocalPool(size); + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + if (UNLIKELY(UseLocalPool())) + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + return AllocateFromLocalPool(nmemb * size); + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +INTERCEPTOR(void*, realloc, void *ptr, uptr size) { + if (UNLIKELY(IsInDlsymAllocPool(ptr))) + return ReallocFromLocalPool(ptr, size); + if (UNLIKELY(UseLocalPool())) + return AllocateFromLocalPool(size); + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +#if SANITIZER_INTERCEPT_MEMALIGN +INTERCEPTOR(void*, memalign, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_memalign(boundary, size, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC); + DTLS_on_libc_memalign(res, size); + return res; +} +#endif // SANITIZER_INTERCEPT_MEMALIGN + +#if SANITIZER_INTERCEPT_ALIGNED_ALLOC +INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_aligned_alloc(boundary, size, &stack); +} +#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(ptr, pc, bp); +} + +#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO +// We avoid including malloc.h for portability reasons. +// man mallinfo says the fields are "long", but the implementation uses int. +// It doesn't matter much -- we just need to make sure that the libc's mallinfo +// is not called. +struct fake_mallinfo { + int x[10]; +}; + +INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { + struct fake_mallinfo res; + REAL(memset)(&res, 0, sizeof(res)); + return res; +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return 0; +} +#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + if (UNLIKELY(UseLocalPool())) + return PosixMemalignFromLocalPool(memptr, alignment, size); + GET_STACK_TRACE_MALLOC; + return asan_posix_memalign(memptr, alignment, size, &stack); +} + +INTERCEPTOR(void*, valloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_valloc(size, &stack); +} + +#if SANITIZER_INTERCEPT_PVALLOC +INTERCEPTOR(void*, pvalloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_pvalloc(size, &stack); +} +#endif // SANITIZER_INTERCEPT_PVALLOC + +INTERCEPTOR(void, malloc_stats, void) { + __asan_print_accumulated_stats(); +} + +#if SANITIZER_ANDROID +// Format of __libc_malloc_dispatch has changed in Android L. +// While we are moving towards a solution that does not depend on bionic +// internals, here is something to support both K* and L releases. +struct MallocDebugK { + void *(*malloc)(uptr bytes); + void (*free)(void *mem); + void *(*calloc)(uptr n_elements, uptr elem_size); + void *(*realloc)(void *oldMem, uptr bytes); + void *(*memalign)(uptr alignment, uptr bytes); + uptr (*malloc_usable_size)(void *mem); +}; + +struct MallocDebugL { + void *(*calloc)(uptr n_elements, uptr elem_size); + void (*free)(void *mem); + fake_mallinfo (*mallinfo)(void); + void *(*malloc)(uptr bytes); + uptr (*malloc_usable_size)(void *mem); + void *(*memalign)(uptr alignment, uptr bytes); + int (*posix_memalign)(void **memptr, uptr alignment, uptr size); + void* (*pvalloc)(uptr size); + void *(*realloc)(void *oldMem, uptr bytes); + void* (*valloc)(uptr size); +}; + +ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = { + WRAP(malloc), WRAP(free), WRAP(calloc), + WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)}; + +ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = { + WRAP(calloc), WRAP(free), WRAP(mallinfo), + WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign), + WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc), + WRAP(valloc)}; + +namespace __asan { +void ReplaceSystemMalloc() { + void **__libc_malloc_dispatch_p = + (void **)AsanDlSymNext("__libc_malloc_dispatch"); + if (__libc_malloc_dispatch_p) { + // Decide on K vs L dispatch format by the presence of + // __libc_malloc_default_dispatch export in libc. + void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch"); + if (default_dispatch_p) + *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k; + else + *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l; + } +} +} // namespace __asan + +#else // SANITIZER_ANDROID + +namespace __asan { +void ReplaceSystemMalloc() { +} +} // namespace __asan +#endif // SANITIZER_ANDROID + +#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || + // SANITIZER_NETBSD || SANITIZER_SOLARIS diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_local.h b/contrib/compiler-rt/lib/asan/asan_malloc_local.h new file mode 100644 index 000000000000..0e8de207d98d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_local.h @@ -0,0 +1,44 @@ +//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Provide interfaces to check for and handle local pool memory allocation. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_MALLOC_LOCAL_H +#define ASAN_MALLOC_LOCAL_H + +#include "sanitizer_common/sanitizer_platform.h" +#include "asan_internal.h" + +// On RTEMS, we use the local pool to handle memory allocation when the ASan +// run-time is not up. +static INLINE bool EarlyMalloc() { + return SANITIZER_RTEMS && (!__asan::asan_inited || + __asan::asan_init_is_running); +} + +void* MemalignFromLocalPool(uptr alignment, uptr size); + +#if SANITIZER_RTEMS + +bool IsFromLocalPool(const void *ptr); + +#define ALLOCATE_FROM_LOCAL_POOL UNLIKELY(EarlyMalloc()) +#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr)) + +#else // SANITIZER_RTEMS + +#define ALLOCATE_FROM_LOCAL_POOL 0 +#define IS_FROM_LOCAL_POOL(ptr) 0 + +#endif // SANITIZER_RTEMS + +#endif // ASAN_MALLOC_LOCAL_H diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_mac.cc b/contrib/compiler-rt/lib/asan/asan_malloc_mac.cc new file mode 100644 index 000000000000..27281f1bc834 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_mac.cc @@ -0,0 +1,85 @@ +//===-- asan_malloc_mac.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "asan_interceptors.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" + +using namespace __asan; +#define COMMON_MALLOC_ZONE_NAME "asan" +#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED() +#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited +#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock() +#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock() +#define COMMON_MALLOC_MEMALIGN(alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC) +#define COMMON_MALLOC_MALLOC(size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_malloc(size, &stack) +#define COMMON_MALLOC_REALLOC(ptr, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_realloc(ptr, size, &stack); +#define COMMON_MALLOC_CALLOC(count, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_calloc(count, size, &stack); +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + int res = asan_posix_memalign(memptr, alignment, size, &stack); +#define COMMON_MALLOC_VALLOC(size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); +#define COMMON_MALLOC_FREE(ptr) \ + GET_STACK_TRACE_FREE; \ + asan_free(ptr, &stack, FROM_MALLOC); +#define COMMON_MALLOC_SIZE(ptr) \ + uptr size = asan_mz_size(ptr); +#define COMMON_MALLOC_FILL_STATS(zone, stats) \ + AsanMallocStats malloc_stats; \ + FillMallocStatistics(&malloc_stats); \ + CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \ + internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); +#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ + GET_STACK_TRACE_FREE; \ + ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); +#define COMMON_MALLOC_NAMESPACE __asan + +#include "sanitizer_common/sanitizer_malloc_mac.inc" + +namespace COMMON_MALLOC_NAMESPACE { +bool HandleDlopenInit() { + static_assert(SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be true"); + // We have no reliable way of knowing how we are being loaded + // so make it a requirement on Apple platforms to set this environment + // variable to indicate that we want to perform initialization via + // dlopen(). + auto init_str = GetEnv("APPLE_ASAN_INIT_FOR_DLOPEN"); + if (!init_str) + return false; + if (internal_strncmp(init_str, "1", 1) != 0) + return false; + // When we are loaded via `dlopen()` path we still initialize the malloc zone + // so Symbolication clients (e.g. `leaks`) that load the ASan allocator can + // find an initialized malloc zone. + InitMallocZoneFields(); + return true; +} +} // namespace COMMON_MALLOC_NAMESPACE + +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_win.cc b/contrib/compiler-rt/lib/asan/asan_malloc_win.cc new file mode 100644 index 000000000000..887936431931 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_win.cc @@ -0,0 +1,267 @@ +//===-- asan_malloc_win.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Windows-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS +// Intentionally not including windows.h here, to avoid the risk of +// pulling in conflicting declarations of these functions. (With mingw-w64, +// there's a risk of windows.h pulling in stdint.h.) +typedef int BOOL; +typedef void *HANDLE; +typedef const void *LPCVOID; +typedef void *LPVOID; + +#define HEAP_ZERO_MEMORY 0x00000008 +#define HEAP_REALLOC_IN_PLACE_ONLY 0x00000010 + + +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stack.h" +#include "interception/interception.h" + +#include <stddef.h> + +using namespace __asan; // NOLINT + +// MT: Simply defining functions with the same signature in *.obj +// files overrides the standard functions in the CRT. +// MD: Memory allocation functions are defined in the CRT .dll, +// so we have to intercept them before they are called for the first time. + +#if ASAN_DYNAMIC +# define ALLOCATION_FUNCTION_ATTRIBUTE +#else +# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +#endif + +extern "C" { +ALLOCATION_FUNCTION_ATTRIBUTE +void free(void *ptr) { + GET_STACK_TRACE_FREE; + return asan_free(ptr, &stack, FROM_MALLOC); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void _free_dbg(void *ptr, int) { + free(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void _free_base(void *ptr) { + free(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *malloc(size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_malloc_base(size_t size) { + return malloc(size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_malloc_dbg(size_t size, int, const char *, int) { + return malloc(size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *calloc(size_t nmemb, size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_base(size_t nmemb, size_t size) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *realloc(void *ptr, size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_realloc_dbg(void *ptr, size_t size, int) { + UNREACHABLE("_realloc_dbg should not exist!"); + return 0; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_realloc_base(void *ptr, size_t size) { + return realloc(ptr, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_recalloc(void *p, size_t n, size_t elem_size) { + if (!p) + return calloc(n, elem_size); + const size_t size = n * elem_size; + if (elem_size != 0 && size / elem_size != n) + return 0; + return realloc(p, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_recalloc_base(void *p, size_t n, size_t elem_size) { + return _recalloc(p, n, elem_size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +size_t _msize(void *ptr) { + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(ptr, pc, bp); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +size_t _msize_base(void *ptr) { + return _msize(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_expand(void *memblock, size_t size) { + // _expand is used in realloc-like functions to resize the buffer if possible. + // We don't want memory to stand still while resizing buffers, so return 0. + return 0; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_expand_dbg(void *memblock, size_t size) { + return _expand(memblock, size); +} + +// TODO(timurrrr): Might want to add support for _aligned_* allocation +// functions to detect a bit more bugs. Those functions seem to wrap malloc(). + +int _CrtDbgReport(int, const char*, int, + const char*, const char*, ...) { + ShowStatsAndAbort(); +} + +int _CrtDbgReportW(int reportType, const wchar_t*, int, + const wchar_t*, const wchar_t*, ...) { + ShowStatsAndAbort(); +} + +int _CrtSetReportMode(int, int) { + return 0; +} +} // extern "C" + +INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags, + SIZE_T dwBytes) { + GET_STACK_TRACE_MALLOC; + void *p = asan_malloc(dwBytes, &stack); + // Reading MSDN suggests that the *entire* usable allocation is zeroed out. + // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY. + // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083 + if (dwFlags == HEAP_ZERO_MEMORY) + internal_memset(p, 0, asan_mz_size(p)); + else + CHECK(dwFlags == 0 && "unsupported heap flags"); + return p; +} + +INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) { + CHECK(dwFlags == 0 && "unsupported heap flags"); + GET_STACK_TRACE_FREE; + asan_free(lpMem, &stack, FROM_MALLOC); + return true; +} + +INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags, + LPVOID lpMem, SIZE_T dwBytes) { + GET_STACK_TRACE_MALLOC; + // Realloc should never reallocate in place. + if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY) + return nullptr; + CHECK(dwFlags == 0 && "unsupported heap flags"); + return asan_realloc(lpMem, dwBytes, &stack); +} + +INTERCEPTOR_WINAPI(SIZE_T, HeapSize, HANDLE hHeap, DWORD dwFlags, + LPCVOID lpMem) { + CHECK(dwFlags == 0 && "unsupported heap flags"); + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(lpMem, pc, bp); +} + +namespace __asan { + +static void TryToOverrideFunction(const char *fname, uptr new_func) { + // Failure here is not fatal. The CRT may not be present, and different CRT + // versions use different symbols. + if (!__interception::OverrideFunction(fname, new_func)) + VPrintf(2, "Failed to override function %s\n", fname); +} + +void ReplaceSystemMalloc() { +#if defined(ASAN_DYNAMIC) + TryToOverrideFunction("free", (uptr)free); + TryToOverrideFunction("_free_base", (uptr)free); + TryToOverrideFunction("malloc", (uptr)malloc); + TryToOverrideFunction("_malloc_base", (uptr)malloc); + TryToOverrideFunction("_malloc_crt", (uptr)malloc); + TryToOverrideFunction("calloc", (uptr)calloc); + TryToOverrideFunction("_calloc_base", (uptr)calloc); + TryToOverrideFunction("_calloc_crt", (uptr)calloc); + TryToOverrideFunction("realloc", (uptr)realloc); + TryToOverrideFunction("_realloc_base", (uptr)realloc); + TryToOverrideFunction("_realloc_crt", (uptr)realloc); + TryToOverrideFunction("_recalloc", (uptr)_recalloc); + TryToOverrideFunction("_recalloc_base", (uptr)_recalloc); + TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc); + TryToOverrideFunction("_msize", (uptr)_msize); + TryToOverrideFunction("_msize_base", (uptr)_msize); + TryToOverrideFunction("_expand", (uptr)_expand); + TryToOverrideFunction("_expand_base", (uptr)_expand); + + // Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which + // enable cross-module inlining. This means our _malloc_base hook won't catch + // all CRT allocations. This code here patches the import table of + // ucrtbase.dll so that all attempts to use the lower-level win32 heap + // allocation API will be directed to ASan's heap. We don't currently + // intercept all calls to HeapAlloc. If we did, we would have to check on + // HeapFree whether the pointer came from ASan of from the system. +#define INTERCEPT_UCRT_FUNCTION(func) \ + if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \ + "api-ms-win-core-heap-l1-1-0.dll", func)) \ + VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func); + INTERCEPT_UCRT_FUNCTION(HeapAlloc); + INTERCEPT_UCRT_FUNCTION(HeapFree); + INTERCEPT_UCRT_FUNCTION(HeapReAlloc); + INTERCEPT_UCRT_FUNCTION(HeapSize); +#undef INTERCEPT_UCRT_FUNCTION +#endif +} +} // namespace __asan + +#endif // _WIN32 diff --git a/contrib/compiler-rt/lib/asan/asan_mapping.h b/contrib/compiler-rt/lib/asan/asan_mapping.h new file mode 100644 index 000000000000..f3696da62b30 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_mapping.h @@ -0,0 +1,401 @@ +//===-- asan_mapping.h ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Defines ASan memory mapping. +//===----------------------------------------------------------------------===// +#ifndef ASAN_MAPPING_H +#define ASAN_MAPPING_H + +#include "asan_internal.h" + +// The full explanation of the memory mapping could be found here: +// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm +// +// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000: +// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || +// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || +// || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap || +// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || +// || `[0x000000000000, 0x00007fff7fff]` || LowMem || +// +// When SHADOW_OFFSET is zero (-pie): +// || `[0x100000000000, 0x7fffffffffff]` || HighMem || +// || `[0x020000000000, 0x0fffffffffff]` || HighShadow || +// || `[0x000000040000, 0x01ffffffffff]` || ShadowGap || +// +// Special case when something is already mapped between +// 0x003000000000 and 0x005000000000 (e.g. when prelink is installed): +// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || +// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || +// || `[0x005000000000, 0x02008fff6fff]` || ShadowGap3 || +// || `[0x003000000000, 0x004fffffffff]` || MidMem || +// || `[0x000a7fff8000, 0x002fffffffff]` || ShadowGap2 || +// || `[0x00067fff8000, 0x000a7fff7fff]` || MidShadow || +// || `[0x00008fff7000, 0x00067fff7fff]` || ShadowGap || +// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || +// || `[0x000000000000, 0x00007fff7fff]` || LowMem || +// +// Default Linux/i386 mapping on x86_64 machine: +// || `[0x40000000, 0xffffffff]` || HighMem || +// || `[0x28000000, 0x3fffffff]` || HighShadow || +// || `[0x24000000, 0x27ffffff]` || ShadowGap || +// || `[0x20000000, 0x23ffffff]` || LowShadow || +// || `[0x00000000, 0x1fffffff]` || LowMem || +// +// Default Linux/i386 mapping on i386 machine +// (addresses starting with 0xc0000000 are reserved +// for kernel and thus not sanitized): +// || `[0x38000000, 0xbfffffff]` || HighMem || +// || `[0x27000000, 0x37ffffff]` || HighShadow || +// || `[0x24000000, 0x26ffffff]` || ShadowGap || +// || `[0x20000000, 0x23ffffff]` || LowShadow || +// || `[0x00000000, 0x1fffffff]` || LowMem || +// +// Default Linux/MIPS32 mapping: +// || `[0x2aaa0000, 0xffffffff]` || HighMem || +// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || +// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || +// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || +// || `[0x00000000, 0x0aa9ffff]` || LowMem || +// +// Default Linux/MIPS64 mapping: +// || `[0x4000000000, 0xffffffffff]` || HighMem || +// || `[0x2800000000, 0x3fffffffff]` || HighShadow || +// || `[0x2400000000, 0x27ffffffff]` || ShadowGap || +// || `[0x2000000000, 0x23ffffffff]` || LowShadow || +// || `[0x0000000000, 0x1fffffffff]` || LowMem || +// +// Default Linux/AArch64 (39-bit VMA) mapping: +// || `[0x2000000000, 0x7fffffffff]` || highmem || +// || `[0x1400000000, 0x1fffffffff]` || highshadow || +// || `[0x1200000000, 0x13ffffffff]` || shadowgap || +// || `[0x1000000000, 0x11ffffffff]` || lowshadow || +// || `[0x0000000000, 0x0fffffffff]` || lowmem || +// +// Default Linux/AArch64 (42-bit VMA) mapping: +// || `[0x10000000000, 0x3ffffffffff]` || highmem || +// || `[0x0a000000000, 0x0ffffffffff]` || highshadow || +// || `[0x09000000000, 0x09fffffffff]` || shadowgap || +// || `[0x08000000000, 0x08fffffffff]` || lowshadow || +// || `[0x00000000000, 0x07fffffffff]` || lowmem || +// +// Default Linux/S390 mapping: +// || `[0x30000000, 0x7fffffff]` || HighMem || +// || `[0x26000000, 0x2fffffff]` || HighShadow || +// || `[0x24000000, 0x25ffffff]` || ShadowGap || +// || `[0x20000000, 0x23ffffff]` || LowShadow || +// || `[0x00000000, 0x1fffffff]` || LowMem || +// +// Default Linux/SystemZ mapping: +// || `[0x14000000000000, 0x1fffffffffffff]` || HighMem || +// || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow || +// || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap || +// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow || +// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem || +// +// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: +// || `[0x500000000000, 0x7fffffffffff]` || HighMem || +// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || +// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap || +// || `[0x400000000000, 0x47ffffffffff]` || LowShadow || +// || `[0x000000000000, 0x3fffffffffff]` || LowMem || +// +// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000: +// || `[0x60000000, 0xffffffff]` || HighMem || +// || `[0x4c000000, 0x5fffffff]` || HighShadow || +// || `[0x48000000, 0x4bffffff]` || ShadowGap || +// || `[0x40000000, 0x47ffffff]` || LowShadow || +// || `[0x00000000, 0x3fffffff]` || LowMem || +// +// Shadow mapping on NetBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: +// || `[0x4feffffffe01, 0x7f7ffffff000]` || HighMem || +// || `[0x49fdffffffc0, 0x4feffffffe00]` || HighShadow || +// || `[0x480000000000, 0x49fdffffffbf]` || ShadowGap || +// || `[0x400000000000, 0x47ffffffffff]` || LowShadow || +// || `[0x000000000000, 0x3fffffffffff]` || LowMem || +// +// Shadow mapping on NetBSD/i386 with SHADOW_OFFSET == 0x40000000: +// || `[0x60000000, 0xfffff000]` || HighMem || +// || `[0x4c000000, 0x5fffffff]` || HighShadow || +// || `[0x48000000, 0x4bffffff]` || ShadowGap || +// || `[0x40000000, 0x47ffffff]` || LowShadow || +// || `[0x00000000, 0x3fffffff]` || LowMem || +// +// Default Windows/i386 mapping: +// (the exact location of HighShadow/HighMem may vary depending +// on WoW64, /LARGEADDRESSAWARE, etc). +// || `[0x50000000, 0xffffffff]` || HighMem || +// || `[0x3a000000, 0x4fffffff]` || HighShadow || +// || `[0x36000000, 0x39ffffff]` || ShadowGap || +// || `[0x30000000, 0x35ffffff]` || LowShadow || +// || `[0x00000000, 0x2fffffff]` || LowMem || +// +// Shadow mapping on Myriad2 (for shadow scale 5): +// || `[0x9ff80000, 0x9fffffff]` || ShadowGap || +// || `[0x9f000000, 0x9ff7ffff]` || LowShadow || +// || `[0x80000000, 0x9effffff]` || LowMem || +// || `[0x00000000, 0x7fffffff]` || Ignored || + +#if defined(ASAN_SHADOW_SCALE) +static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE; +#else +static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3; +#endif +static const u64 kDefaultShadowSentinel = ~(uptr)0; +static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 +static const u64 kDefaultShadowOffset64 = 1ULL << 44; +static const u64 kDefaultShort64bitShadowOffset = + 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G. +static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kIosShadowOffset64 = 0x120200000; +static const u64 kIosSimShadowOffset32 = 1ULL << 30; +static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; +static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; +static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; +static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; +static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; +static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; +static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 +static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 +static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 + +static const u64 kMyriadMemoryOffset32 = 0x80000000ULL; +static const u64 kMyriadMemorySize32 = 0x20000000ULL; +static const u64 kMyriadMemoryEnd32 = + kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1; +static const u64 kMyriadShadowOffset32 = + (kMyriadMemoryOffset32 + kMyriadMemorySize32 - + (kMyriadMemorySize32 >> kDefaultShadowScale)); +static const u64 kMyriadCacheBitMask32 = 0x40000000ULL; + +#define SHADOW_SCALE kDefaultShadowScale + +#if SANITIZER_FUCHSIA +# define SHADOW_OFFSET (0) +#elif SANITIZER_WORDSIZE == 32 +# if SANITIZER_ANDROID +# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# elif defined(__mips__) +# define SHADOW_OFFSET kMIPS32_ShadowOffset32 +# elif SANITIZER_FREEBSD +# define SHADOW_OFFSET kFreeBSD_ShadowOffset32 +# elif SANITIZER_NETBSD +# define SHADOW_OFFSET kNetBSD_ShadowOffset32 +# elif SANITIZER_WINDOWS +# define SHADOW_OFFSET kWindowsShadowOffset32 +# elif SANITIZER_IOS +# if SANITIZER_IOSSIM +# define SHADOW_OFFSET kIosSimShadowOffset32 +# else +# define SHADOW_OFFSET kIosShadowOffset32 +# endif +# elif SANITIZER_MYRIAD2 +# define SHADOW_OFFSET kMyriadShadowOffset32 +# else +# define SHADOW_OFFSET kDefaultShadowOffset32 +# endif +#else +# if SANITIZER_IOS +# if SANITIZER_IOSSIM +# define SHADOW_OFFSET kIosSimShadowOffset64 +# else +# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# endif +# elif defined(__aarch64__) +# define SHADOW_OFFSET kAArch64_ShadowOffset64 +# elif defined(__powerpc64__) +# define SHADOW_OFFSET kPPC64_ShadowOffset64 +# elif defined(__s390x__) +# define SHADOW_OFFSET kSystemZ_ShadowOffset64 +# elif SANITIZER_FREEBSD +# define SHADOW_OFFSET kFreeBSD_ShadowOffset64 +# elif SANITIZER_NETBSD +# define SHADOW_OFFSET kNetBSD_ShadowOffset64 +# elif SANITIZER_MAC +# define SHADOW_OFFSET kDefaultShadowOffset64 +# elif defined(__mips64) +# define SHADOW_OFFSET kMIPS64_ShadowOffset64 +# elif SANITIZER_WINDOWS64 +# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# else +# define SHADOW_OFFSET kDefaultShort64bitShadowOffset +# endif +#endif + +#if SANITIZER_ANDROID && defined(__arm__) +# define ASAN_PREMAP_SHADOW 1 +#else +# define ASAN_PREMAP_SHADOW 0 +#endif + +#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) + +#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. + +#if DO_ASAN_MAPPING_PROFILE +# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; +#else +# define PROFILE_ASAN_MAPPING() +#endif + +// If 1, all shadow boundaries are constants. +// Don't set to 1 other than for testing. +#define ASAN_FIXED_MAPPING 0 + +namespace __asan { + +extern uptr AsanMappingProfile[]; + +#if ASAN_FIXED_MAPPING +// Fixed mapping for 64-bit Linux. Mostly used for performance comparison +// with non-fixed mapping. As of r175253 (Feb 2013) the performance +// difference between fixed and non-fixed mapping is below the noise level. +static uptr kHighMemEnd = 0x7fffffffffffULL; +static uptr kMidMemBeg = 0x3000000000ULL; +static uptr kMidMemEnd = 0x4fffffffffULL; +#else +extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. +#endif + +} // namespace __asan + +#if SANITIZER_MYRIAD2 +#include "asan_mapping_myriad.h" +#else +#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) + +#define kLowMemBeg 0 +#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) + +#define kLowShadowBeg SHADOW_OFFSET +#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) + +#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1) + +#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) +#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) + +# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) +# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) + +// With the zero shadow base we can not actually map pages starting from 0. +// This constant is somewhat arbitrary. +#define kZeroBaseShadowStart 0 +#define kZeroBaseMaxShadowStart (1 << 18) + +#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ + : kZeroBaseShadowStart) +#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) + +#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) +#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) + +#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) +#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) + +namespace __asan { + +static inline bool AddrIsInLowMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return a <= kLowMemEnd; +} + +static inline bool AddrIsInLowShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return a >= kLowShadowBeg && a <= kLowShadowEnd; +} + +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd; +} + +static inline bool AddrIsInHighMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd; +} + +static inline bool AddrIsInHighShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd; +} + +static inline bool AddrIsInShadowGap(uptr a) { + PROFILE_ASAN_MAPPING(); + if (kMidMemBeg) { + if (a <= kShadowGapEnd) + return SHADOW_OFFSET == 0 || a >= kShadowGapBeg; + return (a >= kShadowGap2Beg && a <= kShadowGap2End) || + (a >= kShadowGap3Beg && a <= kShadowGap3End); + } + // In zero-based shadow mode we treat addresses near zero as addresses + // in shadow gap as well. + if (SHADOW_OFFSET == 0) + return a <= kShadowGapEnd; + return a >= kShadowGapBeg && a <= kShadowGapEnd; +} + +} // namespace __asan + +#endif // SANITIZER_MYRIAD2 + +namespace __asan { + +static inline bool AddrIsInMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) || + (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a)); +} + +static inline uptr MemToShadow(uptr p) { + PROFILE_ASAN_MAPPING(); + CHECK(AddrIsInMem(p)); + return MEM_TO_SHADOW(p); +} + +static inline bool AddrIsInShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); +} + +static inline bool AddrIsAlignedByGranularity(uptr a) { + PROFILE_ASAN_MAPPING(); + return (a & (SHADOW_GRANULARITY - 1)) == 0; +} + +static inline bool AddressIsPoisoned(uptr a) { + PROFILE_ASAN_MAPPING(); + if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a)) + return false; + const uptr kAccessSize = 1; + u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); + s8 shadow_value = *shadow_address; + if (shadow_value) { + u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1)) + + kAccessSize - 1; + return (last_accessed_byte >= shadow_value); + } + return false; +} + +// Must be after all calls to PROFILE_ASAN_MAPPING(). +static const uptr kAsanMappingProfileSize = __LINE__; + +} // namespace __asan + +#endif // ASAN_MAPPING_H diff --git a/contrib/compiler-rt/lib/asan/asan_mapping_myriad.h b/contrib/compiler-rt/lib/asan/asan_mapping_myriad.h new file mode 100644 index 000000000000..baa3247bd190 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_mapping_myriad.h @@ -0,0 +1,86 @@ +//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Myriad-specific definitions for ASan memory mapping. +//===----------------------------------------------------------------------===// +#ifndef ASAN_MAPPING_MYRIAD_H +#define ASAN_MAPPING_MYRIAD_H + +#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32) +#define MEM_TO_SHADOW(mem) \ + (((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET)) + +#define kLowMemBeg kMyriadMemoryOffset32 +#define kLowMemEnd (SHADOW_OFFSET - 1) + +#define kLowShadowBeg SHADOW_OFFSET +#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) + +#define kHighMemBeg 0 + +#define kHighShadowBeg 0 +#define kHighShadowEnd 0 + +#define kMidShadowBeg 0 +#define kMidShadowEnd 0 + +#define kShadowGapBeg (kLowShadowEnd + 1) +#define kShadowGapEnd kMyriadMemoryEnd32 + +#define kShadowGap2Beg 0 +#define kShadowGap2End 0 + +#define kShadowGap3Beg 0 +#define kShadowGap3End 0 + +namespace __asan { + +static inline bool AddrIsInLowMem(uptr a) { + PROFILE_ASAN_MAPPING(); + a = RAW_ADDR(a); + return a >= kLowMemBeg && a <= kLowMemEnd; +} + +static inline bool AddrIsInLowShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + a = RAW_ADDR(a); + return a >= kLowShadowBeg && a <= kLowShadowEnd; +} + +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInHighMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInHighShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInShadowGap(uptr a) { + PROFILE_ASAN_MAPPING(); + a = RAW_ADDR(a); + return a >= kShadowGapBeg && a <= kShadowGapEnd; +} + +} // namespace __asan + +#endif // ASAN_MAPPING_MYRIAD_H diff --git a/contrib/compiler-rt/lib/asan/asan_memory_profile.cc b/contrib/compiler-rt/lib/asan/asan_memory_profile.cc new file mode 100644 index 000000000000..8c86d9fe49f2 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_memory_profile.cc @@ -0,0 +1,130 @@ +//===-- asan_memory_profile.cc.cc -----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file implements __sanitizer_print_memory_profile. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" +#include "lsan/lsan_common.h" +#include "asan/asan_allocator.h" + +#if CAN_SANITIZE_LEAKS + +namespace __asan { + +struct AllocationSite { + u32 id; + uptr total_size; + uptr count; +}; + +class HeapProfile { + public: + HeapProfile() { allocations_.reserve(1024); } + + void ProcessChunk(const AsanChunkView &cv) { + if (cv.IsAllocated()) { + total_allocated_user_size_ += cv.UsedSize(); + total_allocated_count_++; + u32 id = cv.GetAllocStackId(); + if (id) + Insert(id, cv.UsedSize()); + } else if (cv.IsQuarantined()) { + total_quarantined_user_size_ += cv.UsedSize(); + total_quarantined_count_++; + } else { + total_other_count_++; + } + } + + void Print(uptr top_percent, uptr max_number_of_contexts) { + Sort(allocations_.data(), allocations_.size(), + [](const AllocationSite &a, const AllocationSite &b) { + return a.total_size > b.total_size; + }); + CHECK(total_allocated_user_size_); + uptr total_shown = 0; + Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: " + "%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; " + "showing top %zd%% (at most %zd unique contexts)\n", + total_allocated_user_size_, total_allocated_count_, + total_quarantined_user_size_, total_quarantined_count_, + total_other_count_, total_allocated_count_ + + total_quarantined_count_ + total_other_count_, top_percent, + max_number_of_contexts); + for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts); + i++) { + auto &a = allocations_[i]; + Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size, + a.total_size * 100 / total_allocated_user_size_, a.count); + StackDepotGet(a.id).Print(); + total_shown += a.total_size; + if (total_shown * 100 / total_allocated_user_size_ > top_percent) + break; + } + } + + private: + uptr total_allocated_user_size_ = 0; + uptr total_allocated_count_ = 0; + uptr total_quarantined_user_size_ = 0; + uptr total_quarantined_count_ = 0; + uptr total_other_count_ = 0; + InternalMmapVector<AllocationSite> allocations_; + + void Insert(u32 id, uptr size) { + // Linear lookup will be good enough for most cases (although not all). + for (uptr i = 0; i < allocations_.size(); i++) { + if (allocations_[i].id == id) { + allocations_[i].total_size += size; + allocations_[i].count++; + return; + } + } + allocations_.push_back({id, size, 1}); + } +}; + +static void ChunkCallback(uptr chunk, void *arg) { + reinterpret_cast<HeapProfile*>(arg)->ProcessChunk( + FindHeapChunkByAllocBeg(chunk)); +} + +static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list, + void *argument) { + HeapProfile hp; + __lsan::ForEachChunk(ChunkCallback, &hp); + uptr *Arg = reinterpret_cast<uptr*>(argument); + hp.Print(Arg[0], Arg[1]); + + if (Verbosity()) + __asan_print_accumulated_stats(); +} + +} // namespace __asan + +#endif // CAN_SANITIZE_LEAKS + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_memory_profile(uptr top_percent, + uptr max_number_of_contexts) { +#if CAN_SANITIZE_LEAKS + uptr Arg[2]; + Arg[0] = top_percent; + Arg[1] = max_number_of_contexts; + __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg); +#endif // CAN_SANITIZE_LEAKS +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/asan/asan_new_delete.cc b/contrib/compiler-rt/lib/asan/asan_new_delete.cc new file mode 100644 index 000000000000..e6053c1fe87f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_new_delete.cc @@ -0,0 +1,211 @@ +//===-- asan_interceptors.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Interceptors for operators new and delete. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_malloc_local.h" +#include "asan_report.h" +#include "asan_stack.h" + +#include "interception/interception.h" + +#include <stddef.h> + +// C++ operators can't have dllexport attributes on Windows. We export them +// anyway by passing extra -export flags to the linker, which is exactly that +// dllexport would normally do. We need to export them in order to make the +// VS2015 dynamic CRT (MD) work. +#if SANITIZER_WINDOWS && defined(_MSC_VER) +#define CXX_OPERATOR_ATTRIBUTE +#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym)) +#ifdef _WIN64 +COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new +COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow +COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete +COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete +COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[] +COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[] +#else +COMMENT_EXPORT("??2@YAPAXI@Z") // operator new +COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow +COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete +COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete +COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[] +COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[] +#endif +#undef COMMENT_EXPORT +#else +#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE +#endif + +using namespace __asan; // NOLINT + +// FreeBSD prior v9.2 have wrong definition of 'size_t'. +// http://svnweb.freebsd.org/base?view=revision&revision=232261 +#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 +#include <sys/param.h> +#if __FreeBSD_version <= 902001 // v9.2 +#define size_t unsigned +#endif // __FreeBSD_version +#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 + +// This code has issues on OSX. +// See https://github.com/google/sanitizers/issues/131. + +// Fake std::nothrow_t and std::align_val_t to avoid including <new>. +namespace std { +struct nothrow_t {}; +enum class align_val_t: size_t {}; +} // namespace std + +// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM. +// For local pool allocation, align to SHADOW_GRANULARITY to match asan +// allocator behavior. +#define OPERATOR_NEW_BODY(type, nothrow) \ + if (ALLOCATE_FROM_LOCAL_POOL) {\ + void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size);\ + if (!nothrow) CHECK(res);\ + return res;\ + }\ + GET_STACK_TRACE_MALLOC;\ + void *res = asan_memalign(0, size, &stack, type);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ + return res; +#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ + if (ALLOCATE_FROM_LOCAL_POOL) {\ + void *res = MemalignFromLocalPool((uptr)align, size);\ + if (!nothrow) CHECK(res);\ + return res;\ + }\ + GET_STACK_TRACE_MALLOC;\ + void *res = asan_memalign((uptr)align, size, &stack, type);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ + return res; + +// On OS X it's not enough to just provide our own 'operator new' and +// 'operator delete' implementations, because they're going to be in the +// runtime dylib, and the main executable will depend on both the runtime +// dylib and libstdc++, each of those'll have its implementation of new and +// delete. +// To make sure that C++ allocation/deallocation operators are overridden on +// OS X we need to intercept them using their mangled names. +#if !SANITIZER_MAC +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size) +{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size) +{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); } + +#else // SANITIZER_MAC +INTERCEPTOR(void *, _Znwm, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); +} +INTERCEPTOR(void *, _Znam, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); +} +INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); +} +INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); +} +#endif // !SANITIZER_MAC + +#define OPERATOR_DELETE_BODY(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, 0, 0, &stack, type); + +#define OPERATOR_DELETE_BODY_SIZE(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, size, 0, &stack, type); + +#define OPERATOR_DELETE_BODY_ALIGN(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type); + +#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, size, static_cast<uptr>(align), &stack, type); + +#if !SANITIZER_MAC +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT +{ OPERATOR_DELETE_BODY(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); } + +#else // SANITIZER_MAC +INTERCEPTOR(void, _ZdlPv, void *ptr) +{ OPERATOR_DELETE_BODY(FROM_NEW); } +INTERCEPTOR(void, _ZdaPv, void *ptr) +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW); } +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +#endif // !SANITIZER_MAC diff --git a/contrib/compiler-rt/lib/asan/asan_poisoning.cc b/contrib/compiler-rt/lib/asan/asan_poisoning.cc new file mode 100644 index 000000000000..1e9c37a13a16 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_poisoning.cc @@ -0,0 +1,461 @@ +//===-- asan_poisoning.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Shadow memory poisoning by ASan RTL and by user application. +//===----------------------------------------------------------------------===// + +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +static atomic_uint8_t can_poison_memory; + +void SetCanPoisonMemory(bool value) { + atomic_store(&can_poison_memory, value, memory_order_release); +} + +bool CanPoisonMemory() { + return atomic_load(&can_poison_memory, memory_order_acquire); +} + +void PoisonShadow(uptr addr, uptr size, u8 value) { + if (value && !CanPoisonMemory()) return; + CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); + CHECK(AddrIsAlignedByGranularity(addr + size)); + CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); + CHECK(REAL(memset)); + FastPoisonShadow(addr, size, value); +} + +void PoisonShadowPartialRightRedzone(uptr addr, + uptr size, + uptr redzone_size, + u8 value) { + if (!CanPoisonMemory()) return; + CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); + FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); +} + +struct ShadowSegmentEndpoint { + u8 *chunk; + s8 offset; // in [0, SHADOW_GRANULARITY) + s8 value; // = *chunk; + + explicit ShadowSegmentEndpoint(uptr address) { + chunk = (u8*)MemToShadow(address); + offset = address & (SHADOW_GRANULARITY - 1); + value = *chunk; + } +}; + +void FlushUnneededASanShadowMemory(uptr p, uptr size) { + // Since asan's mapping is compacting, the shadow chunk may be + // not page-aligned, so we only flush the page-aligned portion. + ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size)); +} + +void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { + uptr end = ptr + size; + if (Verbosity()) { + Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", + poison ? "" : "un", ptr, end, size); + if (Verbosity() >= 2) + PRINT_CURRENT_STACK(); + } + CHECK(size); + CHECK_LE(size, 4096); + CHECK(IsAligned(end, SHADOW_GRANULARITY)); + if (!IsAligned(ptr, SHADOW_GRANULARITY)) { + *(u8 *)MemToShadow(ptr) = + poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0; + ptr |= SHADOW_GRANULARITY - 1; + ptr++; + } + for (; ptr < end; ptr += SHADOW_GRANULARITY) + *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// Current implementation of __asan_(un)poison_memory_region doesn't check +// that user program (un)poisons the memory it owns. It poisons memory +// conservatively, and unpoisons progressively to make sure asan shadow +// mapping invariant is preserved (see detailed mapping description here: +// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm). +// +// * if user asks to poison region [left, right), the program poisons +// at least [left, AlignDown(right)). +// * if user asks to unpoison region [left, right), the program unpoisons +// at most [AlignDown(left), right). +void __asan_poison_memory_region(void const volatile *addr, uptr size) { + if (!flags()->allow_user_poisoning || size == 0) return; + uptr beg_addr = (uptr)addr; + uptr end_addr = beg_addr + size; + VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, + (void *)end_addr); + ShadowSegmentEndpoint beg(beg_addr); + ShadowSegmentEndpoint end(end_addr); + if (beg.chunk == end.chunk) { + CHECK_LT(beg.offset, end.offset); + s8 value = beg.value; + CHECK_EQ(value, end.value); + // We can only poison memory if the byte in end.offset is unaddressable. + // No need to re-poison memory if it is poisoned already. + if (value > 0 && value <= end.offset) { + if (beg.offset > 0) { + *beg.chunk = Min(value, beg.offset); + } else { + *beg.chunk = kAsanUserPoisonedMemoryMagic; + } + } + return; + } + CHECK_LT(beg.chunk, end.chunk); + if (beg.offset > 0) { + // Mark bytes from beg.offset as unaddressable. + if (beg.value == 0) { + *beg.chunk = beg.offset; + } else { + *beg.chunk = Min(beg.value, beg.offset); + } + beg.chunk++; + } + REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk); + // Poison if byte in end.offset is unaddressable. + if (end.value > 0 && end.value <= end.offset) { + *end.chunk = kAsanUserPoisonedMemoryMagic; + } +} + +void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { + if (!flags()->allow_user_poisoning || size == 0) return; + uptr beg_addr = (uptr)addr; + uptr end_addr = beg_addr + size; + VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, + (void *)end_addr); + ShadowSegmentEndpoint beg(beg_addr); + ShadowSegmentEndpoint end(end_addr); + if (beg.chunk == end.chunk) { + CHECK_LT(beg.offset, end.offset); + s8 value = beg.value; + CHECK_EQ(value, end.value); + // We unpoison memory bytes up to enbytes up to end.offset if it is not + // unpoisoned already. + if (value != 0) { + *beg.chunk = Max(value, end.offset); + } + return; + } + CHECK_LT(beg.chunk, end.chunk); + if (beg.offset > 0) { + *beg.chunk = 0; + beg.chunk++; + } + REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk); + if (end.offset > 0 && end.value != 0) { + *end.chunk = Max(end.value, end.offset); + } +} + +int __asan_address_is_poisoned(void const volatile *addr) { + return __asan::AddressIsPoisoned((uptr)addr); +} + +uptr __asan_region_is_poisoned(uptr beg, uptr size) { + if (!size) return 0; + uptr end = beg + size; + if (SANITIZER_MYRIAD2) { + // On Myriad, address not in DRAM range need to be treated as + // unpoisoned. + if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0; + if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0; + } else { + if (!AddrIsInMem(beg)) return beg; + if (!AddrIsInMem(end)) return end; + } + CHECK_LT(beg, end); + uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); + uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); + uptr shadow_beg = MemToShadow(aligned_b); + uptr shadow_end = MemToShadow(aligned_e); + // First check the first and the last application bytes, + // then check the SHADOW_GRANULARITY-aligned region by calling + // mem_is_zero on the corresponding shadow. + if (!__asan::AddressIsPoisoned(beg) && + !__asan::AddressIsPoisoned(end - 1) && + (shadow_end <= shadow_beg || + __sanitizer::mem_is_zero((const char *)shadow_beg, + shadow_end - shadow_beg))) + return 0; + // The fast check failed, so we have a poisoned byte somewhere. + // Find it slowly. + for (; beg < end; beg++) + if (__asan::AddressIsPoisoned(beg)) + return beg; + UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found"); + return 0; +} + +#define CHECK_SMALL_REGION(p, size, isWrite) \ + do { \ + uptr __p = reinterpret_cast<uptr>(p); \ + uptr __size = size; \ + if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ + __asan::AddressIsPoisoned(__p + __size - 1))) { \ + GET_CURRENT_PC_BP_SP; \ + uptr __bad = __asan_region_is_poisoned(__p, __size); \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\ + } \ + } while (false) + + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u16 __sanitizer_unaligned_load16(const uu16 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u32 __sanitizer_unaligned_load32(const uu32 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u64 __sanitizer_unaligned_load64(const uu64 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store16(uu16 *p, u16 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store32(uu32 *p, u32 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store64(uu64 *p, u64 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_poison_cxx_array_cookie(uptr p) { + if (SANITIZER_WORDSIZE != 64) return; + if (!flags()->poison_array_cookie) return; + uptr s = MEM_TO_SHADOW(p); + *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_load_cxx_array_cookie(uptr *p) { + if (SANITIZER_WORDSIZE != 64) return *p; + if (!flags()->poison_array_cookie) return *p; + uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p)); + u8 sval = *reinterpret_cast<u8*>(s); + if (sval == kAsanArrayCookieMagic) return *p; + // If sval is not kAsanArrayCookieMagic it can only be freed memory, + // which means that we are going to get double-free. So, return 0 to avoid + // infinite loop of destructors. We don't want to report a double-free here + // though, so print a warning just in case. + // CHECK_EQ(sval, kAsanHeapFreeMagic); + if (sval == kAsanHeapFreeMagic) { + Report("AddressSanitizer: loaded array cookie from free-d memory; " + "expect a double-free report\n"); + return 0; + } + // The cookie may remain unpoisoned if e.g. it comes from a custom + // operator new defined inside a class. + return *p; +} + +// This is a simplified version of __asan_(un)poison_memory_region, which +// assumes that left border of region to be poisoned is properly aligned. +static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { + if (size == 0) return; + uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); + PoisonShadow(addr, aligned_size, + do_poison ? kAsanStackUseAfterScopeMagic : 0); + if (size == aligned_size) + return; + s8 end_offset = (s8)(size - aligned_size); + s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); + s8 end_value = *shadow_end; + if (do_poison) { + // If possible, mark all the bytes mapping to last shadow byte as + // unaddressable. + if (end_value > 0 && end_value <= end_offset) + *shadow_end = (s8)kAsanStackUseAfterScopeMagic; + } else { + // If necessary, mark few first bytes mapping to last shadow byte + // as addressable + if (end_value != 0) + *shadow_end = Max(end_value, end_offset); + } +} + +void __asan_set_shadow_00(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0, size); +} + +void __asan_set_shadow_f1(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf1, size); +} + +void __asan_set_shadow_f2(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf2, size); +} + +void __asan_set_shadow_f3(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf3, size); +} + +void __asan_set_shadow_f5(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf5, size); +} + +void __asan_set_shadow_f8(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf8, size); +} + +void __asan_poison_stack_memory(uptr addr, uptr size) { + VReport(1, "poisoning: %p %zx\n", (void *)addr, size); + PoisonAlignedStackMemory(addr, size, true); +} + +void __asan_unpoison_stack_memory(uptr addr, uptr size) { + VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size); + PoisonAlignedStackMemory(addr, size, false); +} + +void __sanitizer_annotate_contiguous_container(const void *beg_p, + const void *end_p, + const void *old_mid_p, + const void *new_mid_p) { + if (!flags()->detect_container_overflow) return; + VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p, + new_mid_p); + uptr beg = reinterpret_cast<uptr>(beg_p); + uptr end = reinterpret_cast<uptr>(end_p); + uptr old_mid = reinterpret_cast<uptr>(old_mid_p); + uptr new_mid = reinterpret_cast<uptr>(new_mid_p); + uptr granularity = SHADOW_GRANULARITY; + if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && + IsAligned(beg, granularity))) { + GET_STACK_TRACE_FATAL_HERE; + ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid, + &stack); + } + CHECK_LE(end - beg, + FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check. + + uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); + uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); + uptr d1 = RoundDownTo(old_mid, granularity); + // uptr d2 = RoundUpTo(old_mid, granularity); + // Currently we should be in this state: + // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good. + // Make a quick sanity check that we are indeed in this state. + // + // FIXME: Two of these three checks are disabled until we fix + // https://github.com/google/sanitizers/issues/258. + // if (d1 != d2) + // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1); + if (a + granularity <= d1) + CHECK_EQ(*(u8*)MemToShadow(a), 0); + // if (d2 + granularity <= c && c <= end) + // CHECK_EQ(*(u8 *)MemToShadow(c - granularity), + // kAsanContiguousContainerOOBMagic); + + uptr b1 = RoundDownTo(new_mid, granularity); + uptr b2 = RoundUpTo(new_mid, granularity); + // New state: + // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good. + PoisonShadow(a, b1 - a, 0); + PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic); + if (b1 != b2) { + CHECK_EQ(b2 - b1, granularity); + *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1); + } +} + +const void *__sanitizer_contiguous_container_find_bad_address( + const void *beg_p, const void *mid_p, const void *end_p) { + if (!flags()->detect_container_overflow) + return nullptr; + uptr beg = reinterpret_cast<uptr>(beg_p); + uptr end = reinterpret_cast<uptr>(end_p); + uptr mid = reinterpret_cast<uptr>(mid_p); + CHECK_LE(beg, mid); + CHECK_LE(mid, end); + // Check some bytes starting from beg, some bytes around mid, and some bytes + // ending with end. + uptr kMaxRangeToCheck = 32; + uptr r1_beg = beg; + uptr r1_end = Min(beg + kMaxRangeToCheck, mid); + uptr r2_beg = Max(beg, mid - kMaxRangeToCheck); + uptr r2_end = Min(end, mid + kMaxRangeToCheck); + uptr r3_beg = Max(end - kMaxRangeToCheck, mid); + uptr r3_end = end; + for (uptr i = r1_beg; i < r1_end; i++) + if (AddressIsPoisoned(i)) + return reinterpret_cast<const void *>(i); + for (uptr i = r2_beg; i < mid; i++) + if (AddressIsPoisoned(i)) + return reinterpret_cast<const void *>(i); + for (uptr i = mid; i < r2_end; i++) + if (!AddressIsPoisoned(i)) + return reinterpret_cast<const void *>(i); + for (uptr i = r3_beg; i < r3_end; i++) + if (!AddressIsPoisoned(i)) + return reinterpret_cast<const void *>(i); + return nullptr; +} + +int __sanitizer_verify_contiguous_container(const void *beg_p, + const void *mid_p, + const void *end_p) { + return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p, + end_p) == nullptr; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_poison_intra_object_redzone(uptr ptr, uptr size) { + AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true); +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) { + AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false); +} + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool WordIsPoisoned(uptr addr) { + return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); +} +} diff --git a/contrib/compiler-rt/lib/asan/asan_poisoning.h b/contrib/compiler-rt/lib/asan/asan_poisoning.h new file mode 100644 index 000000000000..c94794cead89 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_poisoning.h @@ -0,0 +1,99 @@ +//===-- asan_poisoning.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Shadow memory poisoning by ASan RTL and by user application. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +// Enable/disable memory poisoning. +void SetCanPoisonMemory(bool value); +bool CanPoisonMemory(); + +// Poisons the shadow memory for "size" bytes starting from "addr". +void PoisonShadow(uptr addr, uptr size, u8 value); + +// Poisons the shadow memory for "redzone_size" bytes starting from +// "addr + size". +void PoisonShadowPartialRightRedzone(uptr addr, + uptr size, + uptr redzone_size, + u8 value); + +// Fast versions of PoisonShadow and PoisonShadowPartialRightRedzone that +// assume that memory addresses are properly aligned. Use in +// performance-critical code with care. +ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, + u8 value) { + DCHECK(!value || CanPoisonMemory()); + uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); + uptr shadow_end = MEM_TO_SHADOW( + aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; + // FIXME: Page states are different on Windows, so using the same interface + // for mapping shadow and zeroing out pages doesn't "just work", so we should + // probably provide higher-level interface for these operations. + // For now, just memset on Windows. + if (value || SANITIZER_WINDOWS == 1 || + // TODO(mcgrathr): Fuchsia doesn't allow the shadow mapping to be + // changed at all. It doesn't currently have an efficient means + // to zero a bunch of pages, but maybe we should add one. + SANITIZER_FUCHSIA == 1 || + // RTEMS doesn't have have pages, let alone a fast way to zero + // them, so default to memset. + SANITIZER_RTEMS == 1 || + shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) { + REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); + } else { + uptr page_size = GetPageSizeCached(); + uptr page_beg = RoundUpTo(shadow_beg, page_size); + uptr page_end = RoundDownTo(shadow_end, page_size); + + if (page_beg >= page_end) { + REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg); + } else { + if (page_beg != shadow_beg) { + REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg); + } + if (page_end != shadow_end) { + REAL(memset)((void *)page_end, 0, shadow_end - page_end); + } + ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr); + } + } +} + +ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( + uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { + DCHECK(CanPoisonMemory()); + bool poison_partial = flags()->poison_partial; + u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); + for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { + if (i + SHADOW_GRANULARITY <= size) { + *shadow = 0; // fully addressable + } else if (i >= size) { + *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable + } else { + // first size-i bytes are addressable + *shadow = poison_partial ? static_cast<u8>(size - i) : 0; + } + } +} + +// Calls __sanitizer::ReleaseMemoryPagesToOS() on +// [MemToShadow(p), MemToShadow(p+size)]. +void FlushUnneededASanShadowMemory(uptr p, uptr size); + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_posix.cc b/contrib/compiler-rt/lib/asan/asan_posix.cc new file mode 100644 index 000000000000..ca99c04b3ad8 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_posix.cc @@ -0,0 +1,118 @@ +//===-- asan_posix.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Posix-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_POSIX + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#include <pthread.h> +#include <stdlib.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <unistd.h> + +namespace __asan { + +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + StartReportDeadlySignal(); + SignalContext sig(siginfo, context); + ReportDeadlySignal(sig); +} + +// ---------------------- TSD ---------------- {{{1 + +#if SANITIZER_NETBSD || SANITIZER_FREEBSD +// Thread Static Data cannot be used in early init on NetBSD and FreeBSD. +// Reuse the Asan TSD API for compatibility with existing code +// with an alternative implementation. + +static void (*tsd_destructor)(void *tsd) = nullptr; + +struct tsd_key { + tsd_key() : key(nullptr) {} + ~tsd_key() { + CHECK(tsd_destructor); + if (key) + (*tsd_destructor)(key); + } + void *key; +}; + +static thread_local struct tsd_key key; + +void AsanTSDInit(void (*destructor)(void *tsd)) { + CHECK(!tsd_destructor); + tsd_destructor = destructor; +} + +void *AsanTSDGet() { + CHECK(tsd_destructor); + return key.key; +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_destructor); + CHECK(tsd); + CHECK(!key.key); + key.key = tsd; +} + +void PlatformTSDDtor(void *tsd) { + CHECK(tsd_destructor); + CHECK_EQ(key.key, tsd); + key.key = nullptr; + // Make sure that signal handler can not see a stale current thread pointer. + atomic_signal_fence(memory_order_seq_cst); + AsanThread::TSDDtor(tsd); +} +#else +static pthread_key_t tsd_key; +static bool tsd_key_inited = false; +void AsanTSDInit(void (*destructor)(void *tsd)) { + CHECK(!tsd_key_inited); + tsd_key_inited = true; + CHECK_EQ(0, pthread_key_create(&tsd_key, destructor)); +} + +void *AsanTSDGet() { + CHECK(tsd_key_inited); + return pthread_getspecific(tsd_key); +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_key_inited); + pthread_setspecific(tsd_key, tsd); +} + +void PlatformTSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + if (context->destructor_iterations > 1) { + context->destructor_iterations--; + CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); + return; + } + AsanThread::TSDDtor(tsd); +} +#endif +} // namespace __asan + +#endif // SANITIZER_POSIX diff --git a/contrib/compiler-rt/lib/asan/asan_preinit.cc b/contrib/compiler-rt/lib/asan/asan_preinit.cc new file mode 100644 index 000000000000..a3986d24f9c3 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_preinit.cc @@ -0,0 +1,25 @@ +//===-- asan_preinit.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Call __asan_init at the very early stage of process startup. +//===----------------------------------------------------------------------===// +#include "asan_internal.h" + +using namespace __asan; + +#if SANITIZER_CAN_USE_PREINIT_ARRAY + // The symbol is called __local_asan_preinit, because it's not intended to be + // exported. + // This code linked into the main executable when -fsanitize=address is in + // the link flags. It can only use exported interface functions. + __attribute__((section(".preinit_array"), used)) + void (*__local_asan_preinit)(void) = __asan_init; +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_premap_shadow.cc b/contrib/compiler-rt/lib/asan/asan_premap_shadow.cc new file mode 100644 index 000000000000..229eba99fe0e --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_premap_shadow.cc @@ -0,0 +1,79 @@ +//===-- asan_premap_shadow.cc ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Reserve shadow memory with an ifunc resolver. +//===----------------------------------------------------------------------===// + +#include "asan_mapping.h" + +#if ASAN_PREMAP_SHADOW + +#include "asan_premap_shadow.h" +#include "sanitizer_common/sanitizer_posix.h" + +namespace __asan { + +// The code in this file needs to run in an unrelocated binary. It may not +// access any external symbol, including its own non-hidden globals. + +// Conservative upper limit. +uptr PremapShadowSize() { + uptr granularity = GetMmapGranularity(); + return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity); +} + +// Returns an address aligned to 8 pages, such that one page on the left and +// PremapShadowSize() bytes on the right of it are mapped r/o. +uptr PremapShadow() { + uptr granularity = GetMmapGranularity(); + uptr alignment = granularity * 8; + uptr left_padding = granularity; + uptr shadow_size = PremapShadowSize(); + uptr map_size = shadow_size + left_padding + alignment; + + uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + uptr shadow_end = shadow_start + shadow_size; + internal_munmap(reinterpret_cast<void *>(map_start), + shadow_start - left_padding - map_start); + internal_munmap(reinterpret_cast<void *>(shadow_end), + map_start + map_size - shadow_end); + return shadow_start; +} + +bool PremapShadowFailed() { + uptr shadow = reinterpret_cast<uptr>(&__asan_shadow); + uptr resolver = reinterpret_cast<uptr>(&__asan_premap_shadow); + // shadow == resolver is how Android KitKat and older handles ifunc. + // shadow == 0 just in case. + if (shadow == 0 || shadow == resolver) + return true; + return false; +} +} // namespace __asan + +extern "C" { +decltype(__asan_shadow)* __asan_premap_shadow() { + // The resolver may be called multiple times. Map the shadow just once. + static uptr premapped_shadow = 0; + if (!premapped_shadow) premapped_shadow = __asan::PremapShadow(); + return reinterpret_cast<decltype(__asan_shadow)*>(premapped_shadow); +} + +// __asan_shadow is a "function" that has the same address as the first byte of +// the shadow mapping. +INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void +__asan_shadow(); +} + +#endif // ASAN_PREMAP_SHADOW diff --git a/contrib/compiler-rt/lib/asan/asan_premap_shadow.h b/contrib/compiler-rt/lib/asan/asan_premap_shadow.h new file mode 100644 index 000000000000..41acbdbbb69f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_premap_shadow.h @@ -0,0 +1,30 @@ +//===-- asan_mapping.h ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Premap shadow range with an ifunc resolver. +//===----------------------------------------------------------------------===// + + +#ifndef ASAN_PREMAP_SHADOW_H +#define ASAN_PREMAP_SHADOW_H + +#if ASAN_PREMAP_SHADOW +namespace __asan { +// Conservative upper limit. +uptr PremapShadowSize(); +bool PremapShadowFailed(); +} +#endif + +extern "C" INTERFACE_ATTRIBUTE void __asan_shadow(); +extern "C" decltype(__asan_shadow)* __asan_premap_shadow(); + +#endif // ASAN_PREMAP_SHADOW_H diff --git a/contrib/compiler-rt/lib/asan/asan_report.cc b/contrib/compiler-rt/lib/asan/asan_report.cc new file mode 100644 index 000000000000..439712350e46 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_report.cc @@ -0,0 +1,552 @@ +//===-- asan_report.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file contains error reporting code. +//===----------------------------------------------------------------------===// + +#include "asan_errors.h" +#include "asan_flags.h" +#include "asan_descriptions.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_scariness_score.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +// -------------------- User-specified callbacks ----------------- {{{1 +static void (*error_report_callback)(const char*); +static char *error_message_buffer = nullptr; +static uptr error_message_buffer_pos = 0; +static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); +static const unsigned kAsanBuggyPcPoolSize = 25; +static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; + +void AppendToErrorMessageBuffer(const char *buffer) { + BlockingMutexLock l(&error_message_buf_mutex); + if (!error_message_buffer) { + error_message_buffer = + (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); + error_message_buffer_pos = 0; + } + uptr length = internal_strlen(buffer); + RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); + uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; + internal_strncpy(error_message_buffer + error_message_buffer_pos, + buffer, remaining); + error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; + // FIXME: reallocate the buffer instead of truncating the message. + error_message_buffer_pos += Min(remaining, length); +} + +// ---------------------- Helper functions ----------------------- {{{1 + +void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, + bool in_shadow, const char *after) { + Decorator d; + str->append("%s%s%x%x%s%s", before, + in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, + byte & 15, d.Default(), after); +} + +static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, + const char *zone_name) { + if (zone_ptr) { + if (zone_name) { + Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", + ptr, zone_ptr, zone_name); + } else { + Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", + ptr, zone_ptr); + } + } else { + Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); + } +} + +// ---------------------- Address Descriptions ------------------- {{{1 + +bool ParseFrameDescription(const char *frame_descr, + InternalMmapVector<StackVarDescr> *vars) { + CHECK(frame_descr); + const char *p; + // This string is created by the compiler and has the following form: + // "n alloc_1 alloc_2 ... alloc_n" + // where alloc_i looks like "offset size len ObjectName" + // or "offset size len ObjectName:line". + uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); + if (n_objects == 0) + return false; + + for (uptr i = 0; i < n_objects; i++) { + uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); + uptr size = (uptr)internal_simple_strtoll(p, &p, 10); + uptr len = (uptr)internal_simple_strtoll(p, &p, 10); + if (beg == 0 || size == 0 || *p != ' ') { + return false; + } + p++; + char *colon_pos = internal_strchr(p, ':'); + uptr line = 0; + uptr name_len = len; + if (colon_pos != nullptr && colon_pos < p + len) { + name_len = colon_pos - p; + line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); + } + StackVarDescr var = {beg, size, p, name_len, line}; + vars->push_back(var); + p += len; + } + + return true; +} + +// -------------------- Different kinds of reports ----------------- {{{1 + +// Use ScopedInErrorReport to run common actions just before and +// immediately after printing error report. +class ScopedInErrorReport { + public: + explicit ScopedInErrorReport(bool fatal = false) + : halt_on_error_(fatal || flags()->halt_on_error) { + // Make sure the registry and sanitizer report mutexes are locked while + // we're printing an error report. + // We can lock them only here to avoid self-deadlock in case of + // recursive reports. + asanThreadRegistry().Lock(); + Printf( + "=================================================================\n"); + } + + ~ScopedInErrorReport() { + if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { + asanThreadRegistry().Unlock(); + return; + } + ASAN_ON_ERROR(); + if (current_error_.IsValid()) current_error_.Print(); + + // Make sure the current thread is announced. + DescribeThread(GetCurrentThread()); + // We may want to grab this lock again when printing stats. + asanThreadRegistry().Unlock(); + // Print memory stats. + if (flags()->print_stats) + __asan_print_accumulated_stats(); + + if (common_flags()->print_cmdline) + PrintCmdline(); + + if (common_flags()->print_module_map == 2) PrintModuleMap(); + + // Copy the message buffer so that we could start logging without holding a + // lock that gets aquired during printing. + InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize); + { + BlockingMutexLock l(&error_message_buf_mutex); + internal_memcpy(buffer_copy.data(), + error_message_buffer, kErrorMessageBufferSize); + } + + LogFullErrorReport(buffer_copy.data()); + + if (error_report_callback) { + error_report_callback(buffer_copy.data()); + } + + if (halt_on_error_ && common_flags()->abort_on_error) { + // On Android the message is truncated to 512 characters. + // FIXME: implement "compact" error format, possibly without, or with + // highly compressed stack traces? + // FIXME: or just use the summary line as abort message? + SetAbortMessage(buffer_copy.data()); + } + + // In halt_on_error = false mode, reset the current error object (before + // unlocking). + if (!halt_on_error_) + internal_memset(¤t_error_, 0, sizeof(current_error_)); + + if (halt_on_error_) { + Report("ABORTING\n"); + Die(); + } + } + + void ReportError(const ErrorDescription &description) { + // Can only report one error per ScopedInErrorReport. + CHECK_EQ(current_error_.kind, kErrorKindInvalid); + current_error_ = description; + } + + static ErrorDescription &CurrentError() { + return current_error_; + } + + private: + ScopedErrorReportLock error_report_lock_; + // Error currently being reported. This enables the destructor to interact + // with the debugger and point it to an error description. + static ErrorDescription current_error_; + bool halt_on_error_; +}; + +ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); + +void ReportDeadlySignal(const SignalContext &sig) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); + in_report.ReportError(error); +} + +void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); + in_report.ReportError(error); +} + +void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, + uptr delete_alignment, + BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, + delete_size, delete_alignment); + in_report.ReportError(error); +} + +void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); + in_report.ReportError(error); +} + +void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, + AllocType alloc_type, + AllocType dealloc_type) { + ScopedInErrorReport in_report; + ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, + alloc_type, dealloc_type); + in_report.ReportError(error); +} + +void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); + in_report.ReportError(error); +} + +void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, + addr); + in_report.ReportError(error); +} + +void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); + in_report.ReportError(error); +} + +void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); + in_report.ReportError(error); +} + +void ReportInvalidAllocationAlignment(uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, + alignment); + in_report.ReportError(error); +} + +void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, + size, alignment); + in_report.ReportError(error); +} + +void ReportInvalidPosixMemalignAlignment(uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, + alignment); + in_report.ReportError(error); +} + +void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, + total_size, max_size); + in_report.ReportError(error); +} + +void ReportRssLimitExceeded(BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); + in_report.ReportError(error); +} + +void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); + in_report.ReportError(error); +} + +void ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorStringFunctionMemoryRangesOverlap error( + GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, + length2, function); + in_report.ReportError(error); +} + +void ReportStringFunctionSizeOverflow(uptr offset, uptr size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, + size); + in_report.ReportError(error); +} + +void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, + uptr old_mid, uptr new_mid, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorBadParamsToAnnotateContiguousContainer error( + GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); + in_report.ReportError(error); +} + +void ReportODRViolation(const __asan_global *g1, u32 stack_id1, + const __asan_global *g2, u32 stack_id2) { + ScopedInErrorReport in_report; + ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, + stack_id2); + in_report.ReportError(error); +} + +// ----------------------- CheckForInvalidPointerPair ----------- {{{1 +static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, + uptr a1, uptr a2) { + ScopedInErrorReport in_report; + ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); + in_report.ReportError(error); +} + +static bool IsInvalidPointerPair(uptr a1, uptr a2) { + if (a1 == a2) + return false; + + // 256B in shadow memory can be iterated quite fast + static const uptr kMaxOffset = 2048; + + uptr left = a1 < a2 ? a1 : a2; + uptr right = a1 < a2 ? a2 : a1; + uptr offset = right - left; + if (offset <= kMaxOffset) + return __asan_region_is_poisoned(left, offset); + + AsanThread *t = GetCurrentThread(); + + // check whether left is a stack memory pointer + if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) { + uptr shadow_offset2 = t->GetStackVariableShadowStart(right); + return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; + } + + // check whether left is a heap memory address + HeapAddressDescription hdesc1, hdesc2; + if (GetHeapAddressInformation(left, 0, &hdesc1) && + hdesc1.chunk_access.access_type == kAccessTypeInside) + return !GetHeapAddressInformation(right, 0, &hdesc2) || + hdesc2.chunk_access.access_type != kAccessTypeInside || + hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; + + // check whether left is an address of a global variable + GlobalAddressDescription gdesc1, gdesc2; + if (GetGlobalAddressInformation(left, 0, &gdesc1)) + return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) || + !gdesc1.PointsInsideTheSameVariable(gdesc2); + + if (t->GetStackVariableShadowStart(right) || + GetHeapAddressInformation(right, 0, &hdesc2) || + GetGlobalAddressInformation(right - 1, 0, &gdesc2)) + return true; + + // At this point we know nothing about both a1 and a2 addresses. + return false; +} + +static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { + switch (flags()->detect_invalid_pointer_pairs) { + case 0 : return; + case 1 : if (p1 == nullptr || p2 == nullptr) return; break; + } + + uptr a1 = reinterpret_cast<uptr>(p1); + uptr a2 = reinterpret_cast<uptr>(p2); + + if (IsInvalidPointerPair(a1, a2)) { + GET_CALLER_PC_BP_SP; + ReportInvalidPointerPair(pc, bp, sp, a1, a2); + } +} +// ----------------------- Mac-specific reports ----------------- {{{1 + +void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" + "This is an unrecoverable problem, exiting now.\n", + addr); + PrintZoneForPointer(addr, zone_ptr, zone_name); + stack->Print(); + DescribeAddressIfHeap(addr); +} + +// -------------- SuppressErrorReport -------------- {{{1 +// Avoid error reports duplicating for ASan recover mode. +static bool SuppressErrorReport(uptr pc) { + if (!common_flags()->suppress_equal_pcs) return false; + for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { + uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); + if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, + pc, memory_order_relaxed)) + return false; + if (cmp == pc) return true; + } + Die(); +} + +void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, + uptr access_size, u32 exp, bool fatal) { + if (!fatal && SuppressErrorReport(pc)) return; + ENABLE_FRAME_POINTER; + + // Optimization experiments. + // The experiments can be used to evaluate potential optimizations that remove + // instrumentation (assess false negatives). Instead of completely removing + // some instrumentation, compiler can emit special calls into runtime + // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass + // mask of experiments (exp). + // The reaction to a non-zero value of exp is to be defined. + (void)exp; + + ScopedInErrorReport in_report(fatal); + ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, + access_size); + in_report.ReportError(error); +} + +} // namespace __asan + +// --------------------------- Interface --------------------- {{{1 +using namespace __asan; // NOLINT + +void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, + uptr access_size, u32 exp) { + ENABLE_FRAME_POINTER; + bool fatal = flags()->halt_on_error; + ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); +} + +void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { + BlockingMutexLock l(&error_message_buf_mutex); + error_report_callback = callback; +} + +void __asan_describe_address(uptr addr) { + // Thread registry must be locked while we're describing an address. + asanThreadRegistry().Lock(); + PrintAddressDescription(addr, 1, ""); + asanThreadRegistry().Unlock(); +} + +int __asan_report_present() { + return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; +} + +uptr __asan_get_report_pc() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.pc; + return 0; +} + +uptr __asan_get_report_bp() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.bp; + return 0; +} + +uptr __asan_get_report_sp() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.sp; + return 0; +} + +uptr __asan_get_report_address() { + ErrorDescription &err = ScopedInErrorReport::CurrentError(); + if (err.kind == kErrorKindGeneric) + return err.Generic.addr_description.Address(); + else if (err.kind == kErrorKindDoubleFree) + return err.DoubleFree.addr_description.addr; + return 0; +} + +int __asan_get_report_access_type() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.is_write; + return 0; +} + +uptr __asan_get_report_access_size() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.access_size; + return 0; +} + +const char *__asan_get_report_description() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.bug_descr; + return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_ptr_sub(void *a, void *b) { + CheckForInvalidPointerPair(a, b); +} +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_ptr_cmp(void *a, void *b) { + CheckForInvalidPointerPair(a, b); +} +} // extern "C" + +// Provide default implementation of __asan_on_error that does nothing +// and may be overriden by user. +SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} diff --git a/contrib/compiler-rt/lib/asan/asan_report.h b/contrib/compiler-rt/lib/asan/asan_report.h new file mode 100644 index 000000000000..b0c167dda756 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_report.h @@ -0,0 +1,98 @@ +//===-- asan_report.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for error reporting functions. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_REPORT_H +#define ASAN_REPORT_H + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_thread.h" + +namespace __asan { + +struct StackVarDescr { + uptr beg; + uptr size; + const char *name_pos; + uptr name_len; + uptr line; +}; + +// Returns the number of globals close to the provided address and copies +// them to "globals" array. +int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites, + int max_globals); + +const char *MaybeDemangleGlobalName(const char *name); +void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g); +void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g); + +void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, + bool in_shadow, const char *after = "\n"); + +// The following functions prints address description depending +// on the memory type (shadow/heap/stack/global). +bool ParseFrameDescription(const char *frame_descr, + InternalMmapVector<StackVarDescr> *vars); + +// Different kinds of error reports. +void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, + uptr access_size, u32 exp, bool fatal); +void ReportDeadlySignal(const SignalContext &sig); +void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, + uptr delete_alignment, + BufferedStackTrace *free_stack); +void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); +void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack); +void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, + AllocType alloc_type, + AllocType dealloc_type); +void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack); +void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack); +void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack); +void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack); +void ReportInvalidAllocationAlignment(uptr alignment, + BufferedStackTrace *stack); +void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + BufferedStackTrace *stack); +void ReportInvalidPosixMemalignAlignment(uptr alignment, + BufferedStackTrace *stack); +void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, + BufferedStackTrace *stack); +void ReportRssLimitExceeded(BufferedStackTrace *stack); +void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack); +void ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack); +void ReportStringFunctionSizeOverflow(uptr offset, uptr size, + BufferedStackTrace *stack); +void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, + uptr old_mid, uptr new_mid, + BufferedStackTrace *stack); + +void ReportODRViolation(const __asan_global *g1, u32 stack_id1, + const __asan_global *g2, u32 stack_id2); + +// Mac-specific errors and warnings. +void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, + const char *zone_name, + BufferedStackTrace *stack); +void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, + const char *zone_name, + BufferedStackTrace *stack); + +} // namespace __asan +#endif // ASAN_REPORT_H diff --git a/contrib/compiler-rt/lib/asan/asan_rtems.cc b/contrib/compiler-rt/lib/asan/asan_rtems.cc new file mode 100644 index 000000000000..b48cc6a75d78 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_rtems.cc @@ -0,0 +1,259 @@ +//===-- asan_rtems.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// RTEMS-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_rtems.h" +#if SANITIZER_RTEMS + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" + +#include <pthread.h> +#include <stdlib.h> + +namespace __asan { + +static void ResetShadowMemory() { + uptr shadow_start = SHADOW_OFFSET; + uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32); + uptr gap_start = MEM_TO_SHADOW(shadow_start); + uptr gap_end = MEM_TO_SHADOW(shadow_end); + + REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start); + REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start); +} + +void InitializeShadowMemory() { + kHighMemEnd = 0; + kMidMemBeg = 0; + kMidMemEnd = 0; + + ResetShadowMemory(); +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +void InitializeAsanInterceptors() {} +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} + +// RTEMS only support static linking; it sufficies to return with no +// error. +void *AsanDoesNotSupportStaticLinkage() { return nullptr; } + +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +void EarlyInit() { + // Provide early initialization of shadow memory so that + // instrumented code running before full initialzation will not + // report spurious errors. + ResetShadowMemory(); +} + +// We can use a plain thread_local variable for TSD. +static thread_local void *per_thread; + +void *AsanTSDGet() { return per_thread; } + +void AsanTSDSet(void *tsd) { per_thread = tsd; } + +// There's no initialization needed, and the passed-in destructor +// will never be called. Instead, our own thread destruction hook +// (below) will call AsanThread::TSDDtor directly. +void AsanTSDInit(void (*destructor)(void *tsd)) { + DCHECK(destructor == &PlatformTSDDtor); +} + +void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } + +// +// Thread registration. We provide an API similar to the Fushia port. +// + +struct AsanThread::InitOptions { + uptr stack_bottom, stack_size, tls_bottom, tls_size; +}; + +// Shared setup between thread creation and startup for the initial thread. +static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, + uptr user_id, bool detached, + uptr stack_bottom, uptr stack_size, + uptr tls_bottom, uptr tls_size) { + // In lieu of AsanThread::Create. + AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__); + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); + + // On other systems, AsanThread::Init() is called from the new + // thread itself. But on RTEMS we already know the stack address + // range beforehand, so we can do most of the setup right now. + const AsanThread::InitOptions options = {stack_bottom, stack_size, + tls_bottom, tls_size}; + thread->Init(&options); + return thread; +} + +// This gets the same arguments passed to Init by CreateAsanThread, above. +// We're in the creator thread before the new thread is actually started, but +// its stack and tls address range are already known. +void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { + DCHECK_NE(GetCurrentThread(), this); + DCHECK_NE(GetCurrentThread(), nullptr); + CHECK_NE(options->stack_bottom, 0); + CHECK_NE(options->stack_size, 0); + stack_bottom_ = options->stack_bottom; + stack_top_ = options->stack_bottom + options->stack_size; + tls_begin_ = options->tls_bottom; + tls_end_ = options->tls_bottom + options->tls_size; +} + +// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the +// main thread on RTEMS does not require special treatment; its AsanThread is +// already created by the provided hooks. This function simply looks up and +// returns the created thread. +AsanThread *CreateMainThread() { + return GetThreadContextByTidLocked(0)->thread; +} + +// This is called before each thread creation is attempted. So, in +// its first call, the calling thread is the initial and sole thread. +static void *BeforeThreadCreateHook(uptr user_id, bool detached, + uptr stack_bottom, uptr stack_size, + uptr tls_bottom, uptr tls_size) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) StopInitOrderChecking(); + + GET_STACK_TRACE_THREAD; + u32 parent_tid = GetCurrentTidOrInvalid(); + + return CreateAsanThread(&stack, parent_tid, user_id, detached, + stack_bottom, stack_size, tls_bottom, tls_size); +} + +// This is called after creating a new thread (in the creating thread), +// with the pointer returned by BeforeThreadCreateHook (above). +static void ThreadCreateHook(void *hook, bool aborted) { + AsanThread *thread = static_cast<AsanThread *>(hook); + if (!aborted) { + // The thread was created successfully. + // ThreadStartHook is already running in the new thread. + } else { + // The thread wasn't created after all. + // Clean up everything we set up in BeforeThreadCreateHook. + asanThreadRegistry().FinishThread(thread->tid()); + UnmapOrDie(thread, sizeof(AsanThread)); + } +} + +// This is called (1) in the newly-created thread before it runs anything else, +// with the pointer returned by BeforeThreadCreateHook (above). (2) before a +// thread restart. +static void ThreadStartHook(void *hook, uptr os_id) { + if (!hook) + return; + + AsanThread *thread = static_cast<AsanThread *>(hook); + SetCurrentThread(thread); + + ThreadStatus status = + asanThreadRegistry().GetThreadLocked(thread->tid())->status; + DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning); + // Determine whether we are starting or restarting the thread. + if (status == ThreadStatusCreated) + // In lieu of AsanThread::ThreadStart. + asanThreadRegistry().StartThread(thread->tid(), os_id, + /*workerthread*/ false, nullptr); + else { + // In a thread restart, a thread may resume execution at an + // arbitrary function entry point, with its stack and TLS state + // reset. We unpoison the stack in that case. + PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0); + } +} + +// Each thread runs this just before it exits, +// with the pointer returned by BeforeThreadCreateHook (above). +// All per-thread destructors have already been called. +static void ThreadExitHook(void *hook, uptr os_id) { + AsanThread *thread = static_cast<AsanThread *>(hook); + if (thread) + AsanThread::TSDDtor(thread->context()); +} + +static void HandleExit() { + // Disable ASan by setting it to uninitialized. Also reset the + // shadow memory to avoid reporting errors after the run-time has + // been desroyed. + if (asan_inited) { + asan_inited = false; + ResetShadowMemory(); + } +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} +} // namespace __asan + +// These are declared (in extern "C") by <some_path/sanitizer.h>. +// The system runtime will call our definitions directly. + +extern "C" { +void __sanitizer_early_init() { + __asan::EarlyInit(); +} + +void *__sanitizer_before_thread_create_hook(uptr thread, bool detached, + const char *name, + void *stack_base, size_t stack_size, + void *tls_base, size_t tls_size) { + return __asan::BeforeThreadCreateHook( + thread, detached, + reinterpret_cast<uptr>(stack_base), stack_size, + reinterpret_cast<uptr>(tls_base), tls_size); +} + +void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) { + __asan::ThreadCreateHook(handle, status != 0); +} + +void __sanitizer_thread_start_hook(void *handle, uptr self) { + __asan::ThreadStartHook(handle, self); +} + +void __sanitizer_thread_exit_hook(void *handle, uptr self) { + __asan::ThreadExitHook(handle, self); +} + +void __sanitizer_exit() { + __asan::HandleExit(); +} +} // "C" + +#endif // SANITIZER_RTEMS diff --git a/contrib/compiler-rt/lib/asan/asan_rtl.cc b/contrib/compiler-rt/lib/asan/asan_rtl.cc new file mode 100644 index 000000000000..13344f3b8887 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_rtl.cc @@ -0,0 +1,614 @@ +//===-- asan_rtl.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Main file of the ASan run-time library. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_interface_internal.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "lsan/lsan_common.h" +#include "ubsan/ubsan_init.h" +#include "ubsan/ubsan_platform.h" + +uptr __asan_shadow_memory_dynamic_address; // Global interface symbol. +int __asan_option_detect_stack_use_after_return; // Global interface symbol. +uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. + +namespace __asan { + +uptr AsanMappingProfile[kAsanMappingProfileSize]; + +static void AsanDie() { + static atomic_uint32_t num_calls; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { + // Don't die twice - run a busy loop. + while (1) { } + } + if (common_flags()->print_module_map >= 1) PrintModuleMap(); + if (flags()->sleep_before_dying) { + Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying); + SleepForSeconds(flags()->sleep_before_dying); + } + if (flags()->unmap_shadow_on_exit) { + if (kMidMemBeg) { + UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); + UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); + } else { + if (kHighShadowEnd) + UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); + } + } +} + +static void AsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, + line, cond, (uptr)v1, (uptr)v2); + + // Print a stack trace the first time we come here. Otherwise, we probably + // failed a CHECK during symbolization. + static atomic_uint32_t num_calls; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) { + PRINT_CURRENT_STACK_CHECK(); + } + + Die(); +} + +// -------------------------- Globals --------------------- {{{1 +int asan_inited; +bool asan_init_is_running; + +#if !ASAN_FIXED_MAPPING +uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; +#endif + +// -------------------------- Misc ---------------- {{{1 +void ShowStatsAndAbort() { + __asan_print_accumulated_stats(); + Die(); +} + +// --------------- LowLevelAllocateCallbac ---------- {{{1 +static void OnLowLevelAllocate(uptr ptr, uptr size) { + PoisonShadow(ptr, size, kAsanInternalHeapMagic); +} + +// -------------------------- Run-time entry ------------------- {{{1 +// exported functions +#define ASAN_REPORT_ERROR(type, is_write, size) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size ## _noabort(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ +} \ + +ASAN_REPORT_ERROR(load, false, 1) +ASAN_REPORT_ERROR(load, false, 2) +ASAN_REPORT_ERROR(load, false, 4) +ASAN_REPORT_ERROR(load, false, 8) +ASAN_REPORT_ERROR(load, false, 16) +ASAN_REPORT_ERROR(store, true, 1) +ASAN_REPORT_ERROR(store, true, 2) +ASAN_REPORT_ERROR(store, true, 4) +ASAN_REPORT_ERROR(store, true, 8) +ASAN_REPORT_ERROR(store, true, 16) + +#define ASAN_REPORT_ERROR_N(type, is_write) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ +} \ + +ASAN_REPORT_ERROR_N(load, false) +ASAN_REPORT_ERROR_N(store, true) + +#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ + if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \ + return; \ + uptr sp = MEM_TO_SHADOW(addr); \ + uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \ + : *reinterpret_cast<u16 *>(sp); \ + if (UNLIKELY(s)) { \ + if (UNLIKELY(size >= SHADOW_GRANULARITY || \ + ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \ + (s8)s)) { \ + if (__asan_test_only_reported_buggy_pointer) { \ + *__asan_test_only_reported_buggy_pointer = addr; \ + } else { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \ + fatal); \ + } \ + } \ + } + +#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_##type##size(uptr addr) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \ + } \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_exp_##type##size(uptr addr, u32 exp) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \ + } \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_##type##size ## _noabort(uptr addr) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \ + } \ + +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_loadN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, false, size, 0, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, false, size, exp, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_loadN_noabort(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, false, size, 0, false); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_storeN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, true, size, 0, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, true, size, exp, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_storeN_noabort(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, true, size, 0, false); + } +} + +// Force the linker to keep the symbols for various ASan interface functions. +// We want to keep those in the executable in order to let the instrumented +// dynamic libraries access the symbol even if it is not used by the executable +// itself. This should help if the build system is removing dead code at link +// time. +static NOINLINE void force_interface_symbols() { + volatile int fake_condition = 0; // prevent dead condition elimination. + // __asan_report_* functions are noreturn, so we need a switch to prevent + // the compiler from removing any of them. + // clang-format off + switch (fake_condition) { + case 1: __asan_report_load1(0); break; + case 2: __asan_report_load2(0); break; + case 3: __asan_report_load4(0); break; + case 4: __asan_report_load8(0); break; + case 5: __asan_report_load16(0); break; + case 6: __asan_report_load_n(0, 0); break; + case 7: __asan_report_store1(0); break; + case 8: __asan_report_store2(0); break; + case 9: __asan_report_store4(0); break; + case 10: __asan_report_store8(0); break; + case 11: __asan_report_store16(0); break; + case 12: __asan_report_store_n(0, 0); break; + case 13: __asan_report_exp_load1(0, 0); break; + case 14: __asan_report_exp_load2(0, 0); break; + case 15: __asan_report_exp_load4(0, 0); break; + case 16: __asan_report_exp_load8(0, 0); break; + case 17: __asan_report_exp_load16(0, 0); break; + case 18: __asan_report_exp_load_n(0, 0, 0); break; + case 19: __asan_report_exp_store1(0, 0); break; + case 20: __asan_report_exp_store2(0, 0); break; + case 21: __asan_report_exp_store4(0, 0); break; + case 22: __asan_report_exp_store8(0, 0); break; + case 23: __asan_report_exp_store16(0, 0); break; + case 24: __asan_report_exp_store_n(0, 0, 0); break; + case 25: __asan_register_globals(nullptr, 0); break; + case 26: __asan_unregister_globals(nullptr, 0); break; + case 27: __asan_set_death_callback(nullptr); break; + case 28: __asan_set_error_report_callback(nullptr); break; + case 29: __asan_handle_no_return(); break; + case 30: __asan_address_is_poisoned(nullptr); break; + case 31: __asan_poison_memory_region(nullptr, 0); break; + case 32: __asan_unpoison_memory_region(nullptr, 0); break; + case 34: __asan_before_dynamic_init(nullptr); break; + case 35: __asan_after_dynamic_init(); break; + case 36: __asan_poison_stack_memory(0, 0); break; + case 37: __asan_unpoison_stack_memory(0, 0); break; + case 38: __asan_region_is_poisoned(0, 0); break; + case 39: __asan_describe_address(0); break; + case 40: __asan_set_shadow_00(0, 0); break; + case 41: __asan_set_shadow_f1(0, 0); break; + case 42: __asan_set_shadow_f2(0, 0); break; + case 43: __asan_set_shadow_f3(0, 0); break; + case 44: __asan_set_shadow_f5(0, 0); break; + case 45: __asan_set_shadow_f8(0, 0); break; + } + // clang-format on +} + +static void asan_atexit() { + Printf("AddressSanitizer exit stats:\n"); + __asan_print_accumulated_stats(); + // Print AsanMappingProfile. + for (uptr i = 0; i < kAsanMappingProfileSize; i++) { + if (AsanMappingProfile[i] == 0) continue; + Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]); + } +} + +static void InitializeHighMemEnd() { +#if !SANITIZER_MYRIAD2 +#if !ASAN_FIXED_MAPPING + kHighMemEnd = GetMaxUserVirtualAddress(); + // Increase kHighMemEnd to make sure it's properly + // aligned together with kHighMemBeg: + kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; +#endif // !ASAN_FIXED_MAPPING + CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); +#endif // !SANITIZER_MYRIAD2 +} + +void PrintAddressSpaceLayout() { + if (kHighMemBeg) { + Printf("|| `[%p, %p]` || HighMem ||\n", + (void*)kHighMemBeg, (void*)kHighMemEnd); + Printf("|| `[%p, %p]` || HighShadow ||\n", + (void*)kHighShadowBeg, (void*)kHighShadowEnd); + } + if (kMidMemBeg) { + Printf("|| `[%p, %p]` || ShadowGap3 ||\n", + (void*)kShadowGap3Beg, (void*)kShadowGap3End); + Printf("|| `[%p, %p]` || MidMem ||\n", + (void*)kMidMemBeg, (void*)kMidMemEnd); + Printf("|| `[%p, %p]` || ShadowGap2 ||\n", + (void*)kShadowGap2Beg, (void*)kShadowGap2End); + Printf("|| `[%p, %p]` || MidShadow ||\n", + (void*)kMidShadowBeg, (void*)kMidShadowEnd); + } + Printf("|| `[%p, %p]` || ShadowGap ||\n", + (void*)kShadowGapBeg, (void*)kShadowGapEnd); + if (kLowShadowBeg) { + Printf("|| `[%p, %p]` || LowShadow ||\n", + (void*)kLowShadowBeg, (void*)kLowShadowEnd); + Printf("|| `[%p, %p]` || LowMem ||\n", + (void*)kLowMemBeg, (void*)kLowMemEnd); + } + Printf("MemToShadow(shadow): %p %p", + (void*)MEM_TO_SHADOW(kLowShadowBeg), + (void*)MEM_TO_SHADOW(kLowShadowEnd)); + if (kHighMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kHighShadowBeg), + (void*)MEM_TO_SHADOW(kHighShadowEnd)); + } + if (kMidMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kMidShadowBeg), + (void*)MEM_TO_SHADOW(kMidShadowEnd)); + } + Printf("\n"); + Printf("redzone=%zu\n", (uptr)flags()->redzone); + Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); + Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb); + Printf("thread_local_quarantine_size_kb=%zuK\n", + (uptr)flags()->thread_local_quarantine_size_kb); + Printf("malloc_context_size=%zu\n", + (uptr)common_flags()->malloc_context_size); + + Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); + CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); + if (kMidMemBeg) + CHECK(kMidShadowBeg > kLowShadowEnd && + kMidMemBeg > kMidShadowEnd && + kHighShadowBeg > kMidMemEnd); +} + +#if defined(__thumb__) && defined(__linux__) +#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL +#endif + +#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL +static bool UNUSED __local_asan_dyninit = [] { + MaybeStartBackgroudThread(); + SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); + + return false; +}(); +#endif + +static void AsanInitInternal() { + if (LIKELY(asan_inited)) return; + SanitizerToolName = "AddressSanitizer"; + CHECK(!asan_init_is_running && "ASan init calls itself!"); + asan_init_is_running = true; + + CacheBinaryName(); + CheckASLR(); + + // Initialize flags. This must be done early, because most of the + // initialization steps look at flags(). + InitializeFlags(); + + // Stop performing init at this point if we are being loaded via + // dlopen() and the platform supports it. + if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) { + asan_init_is_running = false; + VReport(1, "AddressSanitizer init is being performed for dlopen().\n"); + return; + } + + AsanCheckIncompatibleRT(); + AsanCheckDynamicRTPrereqs(); + AvoidCVE_2016_2143(); + + SetCanPoisonMemory(flags()->poison_heap); + SetMallocContextSize(common_flags()->malloc_context_size); + + InitializePlatformExceptionHandlers(); + + InitializeHighMemEnd(); + + // Make sure we are not statically linked. + AsanDoesNotSupportStaticLinkage(); + + // Install tool-specific callbacks in sanitizer_common. + AddDieCallback(AsanDie); + SetCheckFailedCallback(AsanCheckFailed); + SetPrintfAndReportCallback(AppendToErrorMessageBuffer); + + __sanitizer_set_report_path(common_flags()->log_path); + + __asan_option_detect_stack_use_after_return = + flags()->detect_stack_use_after_return; + + __sanitizer::InitializePlatformEarly(); + + // Re-exec ourselves if we need to set additional env or command line args. + MaybeReexec(); + + // Setup internal allocator callback. + SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); + SetLowLevelAllocateCallback(OnLowLevelAllocate); + + InitializeAsanInterceptors(); + + // Enable system log ("adb logcat") on Android. + // Doing this before interceptors are initialized crashes in: + // AsanInitInternal -> android_log_write -> __interceptor_strcmp + AndroidLogInit(); + + ReplaceSystemMalloc(); + + DisableCoreDumperIfNecessary(); + + InitializeShadowMemory(); + + AsanTSDInit(PlatformTSDDtor); + InstallDeadlySignalHandlers(AsanOnDeadlySignal); + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + InitializeAllocator(allocator_options); + +#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL + MaybeStartBackgroudThread(); + SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); +#endif + + // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited + // should be set to 1 prior to initializing the threads. + asan_inited = 1; + asan_init_is_running = false; + + if (flags()->atexit) + Atexit(asan_atexit); + + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + + // Now that ASan runtime is (mostly) initialized, deactivate it if + // necessary, so that it can be re-activated when requested. + if (flags()->start_deactivated) + AsanDeactivate(); + + // interceptors + InitTlsSize(); + + // Create main thread. + AsanThread *main_thread = CreateMainThread(); + CHECK_EQ(0, main_thread->tid()); + force_interface_symbols(); // no-op. + SanitizerInitializeUnwinder(); + + if (CAN_SANITIZE_LEAKS) { + __lsan::InitCommonLsan(); + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { + if (flags()->halt_on_error) + Atexit(__lsan::DoLeakCheck); + else + Atexit(__lsan::DoRecoverableLeakCheckVoid); + } + } + +#if CAN_SANITIZE_UB + __ubsan::InitAsPlugin(); +#endif + + InitializeSuppressions(); + + if (CAN_SANITIZE_LEAKS) { + // LateInitialize() calls dlsym, which can allocate an error string buffer + // in the TLS. Let's ignore the allocation to avoid reporting a leak. + __lsan::ScopedInterceptorDisabler disabler; + Symbolizer::LateInitialize(); + } else { + Symbolizer::LateInitialize(); + } + + VReport(1, "AddressSanitizer Init done\n"); + + if (flags()->sleep_after_init) { + Report("Sleeping for %d second(s)\n", flags()->sleep_after_init); + SleepForSeconds(flags()->sleep_after_init); + } +} + +// Initialize as requested from some part of ASan runtime library (interceptors, +// allocator, etc). +void AsanInitFromRtl() { + AsanInitInternal(); +} + +#if ASAN_DYNAMIC +// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable +// (and thus normal initializers from .preinit_array or modules haven't run). + +class AsanInitializer { +public: // NOLINT + AsanInitializer() { + AsanInitFromRtl(); + } +}; + +static AsanInitializer asan_initializer; +#endif // ASAN_DYNAMIC + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +void NOINLINE __asan_handle_no_return() { + if (asan_init_is_running) + return; + + int local_stack; + AsanThread *curr_thread = GetCurrentThread(); + uptr PageSize = GetPageSizeCached(); + uptr top, bottom; + if (curr_thread) { + top = curr_thread->stack_top(); + bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); + } else if (SANITIZER_RTEMS) { + // Give up On RTEMS. + return; + } else { + CHECK(!SANITIZER_FUCHSIA); + // If we haven't seen this thread, try asking the OS for stack bounds. + uptr tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, + &tls_size); + top = bottom + stack_size; + } + static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M + if (top - bottom > kMaxExpectedCleanupSize) { + static bool reported_warning = false; + if (reported_warning) + return; + reported_warning = true; + Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " + "stack top: %p; bottom %p; size: %p (%zd)\n" + "False positive error reports may follow\n" + "For details see " + "https://github.com/google/sanitizers/issues/189\n", + top, bottom, top - bottom, top - bottom); + return; + } + PoisonShadow(bottom, top - bottom, 0); + if (curr_thread && curr_thread->has_fake_stack()) + curr_thread->fake_stack()->HandleNoReturn(); +} + +void NOINLINE __asan_set_death_callback(void (*callback)(void)) { + SetUserDieCallback(callback); +} + +// Initialize as requested from instrumented application code. +// We use this call as a trigger to wake up ASan from deactivated state. +void __asan_init() { + AsanActivate(); + AsanInitInternal(); +} + +void __asan_version_mismatch_check() { + // Do nothing. +} diff --git a/contrib/compiler-rt/lib/asan/asan_scariness_score.h b/contrib/compiler-rt/lib/asan/asan_scariness_score.h new file mode 100644 index 000000000000..7f095dd29f29 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_scariness_score.h @@ -0,0 +1,74 @@ +//===-- asan_scariness_score.h ----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Compute the level of scariness of the error message. +// Don't expect any deep science here, just a set of heuristics that suggest +// that e.g. 1-byte-read-global-buffer-overflow is less scary than +// 8-byte-write-stack-use-after-return. +// +// Every error report has one or more features, such as memory access size, +// type (read or write), type of accessed memory (e.g. free-d heap, or a global +// redzone), etc. Every such feature has an int score and a string description. +// The overall score is the sum of all feature scores and the description +// is a concatenation of feature descriptions. +// Examples: +// 17 (4-byte-read-heap-buffer-overflow) +// 65 (multi-byte-write-stack-use-after-return) +// 10 (null-deref) +// +//===----------------------------------------------------------------------===// + +#ifndef ASAN_SCARINESS_SCORE_H +#define ASAN_SCARINESS_SCORE_H + +#include "asan_flags.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" + +namespace __asan { +struct ScarinessScoreBase { + void Clear() { + descr[0] = 0; + score = 0; + } + void Scare(int add_to_score, const char *reason) { + if (descr[0]) + internal_strlcat(descr, "-", sizeof(descr)); + internal_strlcat(descr, reason, sizeof(descr)); + score += add_to_score; + }; + int GetScore() const { return score; } + const char *GetDescription() const { return descr; } + void Print() const { + if (score && flags()->print_scariness) + Printf("SCARINESS: %d (%s)\n", score, descr); + } + static void PrintSimple(int score, const char *descr) { + ScarinessScoreBase SSB; + SSB.Clear(); + SSB.Scare(score, descr); + SSB.Print(); + } + + private: + int score; + char descr[1024]; +}; + +struct ScarinessScore : ScarinessScoreBase { + ScarinessScore() { + Clear(); + } +}; + +} // namespace __asan + +#endif // ASAN_SCARINESS_SCORE_H diff --git a/contrib/compiler-rt/lib/asan/asan_shadow_setup.cc b/contrib/compiler-rt/lib/asan/asan_shadow_setup.cc new file mode 100644 index 000000000000..083926e70aa2 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_shadow_setup.cc @@ -0,0 +1,165 @@ +//===-- asan_shadow_setup.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Set up the shadow memory. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +// asan_fuchsia.cc and asan_rtems.cc have their own +// InitializeShadowMemory implementation. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +#include "asan_internal.h" +#include "asan_mapping.h" + +namespace __asan { + +// ---------------------- mmap -------------------- {{{1 +// Reserve memory range [beg, end]. +// We need to use inclusive range because end+1 may not be representable. +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { + CHECK_EQ((beg % GetMmapGranularity()), 0); + CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); + uptr size = end - beg + 1; + DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. + if (!MmapFixedNoReserve(beg, size, name)) { + Report( + "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " + "Perhaps you're using ulimit -v\n", + size); + Abort(); + } + if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size); + if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); +} + +static void ProtectGap(uptr addr, uptr size) { + if (!flags()->protect_shadow_gap) { + // The shadow gap is unprotected, so there is a chance that someone + // is actually using this memory. Which means it needs a shadow... + uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached()); + uptr GapShadowEnd = + RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1; + if (Verbosity()) + Printf( + "protect_shadow_gap=0:" + " not protecting shadow gap, allocating gap's shadow\n" + "|| `[%p, %p]` || ShadowGap's shadow ||\n", + GapShadowBeg, GapShadowEnd); + ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd, + "unprotected gap shadow"); + return; + } + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == kZeroBaseShadowStart) { + uptr step = GetMmapGranularity(); + while (size > step && addr < kZeroBaseMaxShadowStart) { + addr += step; + size -= step; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) return; + } + } + + Report( + "ERROR: Failed to protect the shadow gap. " + "ASan cannot proceed correctly. ABORTING.\n"); + DumpProcessMap(); + Die(); +} + +static void MaybeReportLinuxPIEBug() { +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__)) + Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n"); + Report( + "See https://github.com/google/sanitizers/issues/856 for possible " + "workarounds.\n"); +#endif +} + +void InitializeShadowMemory() { + // Set the shadow memory address to uninitialized. + __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; + + uptr shadow_start = kLowShadowBeg; + // Detect if a dynamic shadow address must used and find a available location + // when necessary. When dynamic address is used, the macro |kLowShadowBeg| + // expands to |__asan_shadow_memory_dynamic_address| which is + // |kDefaultShadowSentinel|. + bool full_shadow_is_available = false; + if (shadow_start == kDefaultShadowSentinel) { + __asan_shadow_memory_dynamic_address = 0; + CHECK_EQ(0, kLowShadowBeg); + shadow_start = FindDynamicShadowStart(); + if (SANITIZER_LINUX) full_shadow_is_available = true; + } + // Update the shadow memory address (potentially) used by instrumentation. + __asan_shadow_memory_dynamic_address = shadow_start; + + if (kLowShadowBeg) shadow_start -= GetMmapGranularity(); + + if (!full_shadow_is_available) + full_shadow_is_available = + MemoryRangeIsAvailable(shadow_start, kHighShadowEnd); + +#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \ + !ASAN_FIXED_MAPPING + if (!full_shadow_is_available) { + kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0; + kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0; + } +#endif + + if (Verbosity()) PrintAddressSpaceLayout(); + + if (full_shadow_is_available) { + // mmap the low shadow plus at least one page at the left. + if (kLowShadowBeg) + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); + // protect the gap. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); + } else if (kMidMemBeg && + MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) && + MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { + CHECK(kLowShadowBeg != kLowShadowEnd); + // mmap the low shadow plus at least one page at the left. + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); + // mmap the mid shadow. + ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow"); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); + // protect the gaps. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); + ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); + } else { + Report( + "Shadow memory range interleaves with an existing memory mapping. " + "ASan cannot proceed correctly. ABORTING.\n"); + Report("ASan shadow was supposed to be located in the [%p-%p] range.\n", + shadow_start, kHighShadowEnd); + MaybeReportLinuxPIEBug(); + DumpProcessMap(); + Die(); + } +} + +} // namespace __asan + +#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS diff --git a/contrib/compiler-rt/lib/asan/asan_stack.cc b/contrib/compiler-rt/lib/asan/asan_stack.cc new file mode 100644 index 000000000000..cf7a587fa65a --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stack.cc @@ -0,0 +1,40 @@ +//===-- asan_stack.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Code for ASan stack trace. +//===----------------------------------------------------------------------===// +#include "asan_internal.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" + +namespace __asan { + +static atomic_uint32_t malloc_context_size; + +void SetMallocContextSize(u32 size) { + atomic_store(&malloc_context_size, size, memory_order_release); +} + +u32 GetMallocContextSize() { + return atomic_load(&malloc_context_size, memory_order_acquire); +} + +} // namespace __asan + +// ------------------ Interface -------------- {{{1 + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_stack_trace() { + using namespace __asan; + PRINT_CURRENT_STACK(); +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/asan/asan_stack.h b/contrib/compiler-rt/lib/asan/asan_stack.h new file mode 100644 index 000000000000..8e9df888d798 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stack.h @@ -0,0 +1,114 @@ +//===-- asan_stack.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_stack.cc. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_STACK_H +#define ASAN_STACK_H + +#include "asan_flags.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __asan { + +static const u32 kDefaultMallocContextSize = 30; + +void SetMallocContextSize(u32 size); +u32 GetMallocContextSize(); + +// Get the stack trace with the given pc and bp. +// The pc will be in the position 0 of the resulting stack trace. +// The bp may refer to the current frame or to the caller's frame. +ALWAYS_INLINE +void GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc, uptr bp, + void *context, bool fast) { +#if SANITIZER_WINDOWS + stack->Unwind(max_depth, pc, bp, context, 0, 0, fast); +#else + AsanThread *t; + stack->size = 0; + if (LIKELY(asan_inited)) { + if ((t = GetCurrentThread()) && !t->isUnwinding()) { + uptr stack_top = t->stack_top(); + uptr stack_bottom = t->stack_bottom(); + ScopedUnwinding unwind_scope(t); + if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) { + stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, + fast); + } + } else if (!t && !fast) { + /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ + stack->Unwind(max_depth, pc, bp, context, 0, 0, false); + } + } +#endif // SANITIZER_WINDOWS +} + +} // namespace __asan + +// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors +// as early as possible (in functions exposed to the user), as we generally +// don't want stack trace to contain functions from ASan internals. + +#define GET_STACK_TRACE(max_size, fast) \ + BufferedStackTrace stack; \ + if (max_size <= 2) { \ + stack.size = max_size; \ + if (max_size > 0) { \ + stack.top_frame_bp = GET_CURRENT_FRAME(); \ + stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \ + if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \ + } \ + } else { \ + GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \ + GET_CURRENT_FRAME(), 0, fast); \ + } + +#define GET_STACK_TRACE_FATAL(pc, bp) \ + BufferedStackTrace stack; \ + GetStackTrace(&stack, kStackTraceMax, pc, bp, 0, \ + common_flags()->fast_unwind_on_fatal) + +#define GET_STACK_TRACE_SIGNAL(sig) \ + BufferedStackTrace stack; \ + GetStackTrace(&stack, kStackTraceMax, (sig).pc, (sig).bp, (sig).context, \ + common_flags()->fast_unwind_on_fatal) + +#define GET_STACK_TRACE_FATAL_HERE \ + GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) + +#define GET_STACK_TRACE_CHECK_HERE \ + GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check) + +#define GET_STACK_TRACE_THREAD \ + GET_STACK_TRACE(kStackTraceMax, true) + +#define GET_STACK_TRACE_MALLOC \ + GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc) + +#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC + +#define PRINT_CURRENT_STACK() \ + { \ + GET_STACK_TRACE_FATAL_HERE; \ + stack.Print(); \ + } + +#define PRINT_CURRENT_STACK_CHECK() \ + { \ + GET_STACK_TRACE_CHECK_HERE; \ + stack.Print(); \ + } + +#endif // ASAN_STACK_H diff --git a/contrib/compiler-rt/lib/asan/asan_stats.cc b/contrib/compiler-rt/lib/asan/asan_stats.cc new file mode 100644 index 000000000000..b8c68c32dfbf --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stats.cc @@ -0,0 +1,174 @@ +//===-- asan_stats.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Code related to statistics collected by AddressSanitizer. +//===----------------------------------------------------------------------===// +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stats.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +AsanStats::AsanStats() { + Clear(); +} + +void AsanStats::Clear() { + CHECK(REAL(memset)); + REAL(memset)(this, 0, sizeof(AsanStats)); +} + +static void PrintMallocStatsArray(const char *prefix, + uptr (&array)[kNumberOfSizeClasses]) { + Printf("%s", prefix); + for (uptr i = 0; i < kNumberOfSizeClasses; i++) { + if (!array[i]) continue; + Printf("%zu:%zu; ", i, array[i]); + } + Printf("\n"); +} + +void AsanStats::Print() { + Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n", + malloced>>20, malloced_redzones>>20, mallocs); + Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs); + Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); + Printf("Stats: %zuM really freed by %zu calls\n", + really_freed>>20, real_frees); + Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n", + (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, + mmaps, munmaps); + + PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); + Printf("Stats: malloc large: %zu\n", malloc_large); +} + +void AsanStats::MergeFrom(const AsanStats *stats) { + uptr *dst_ptr = reinterpret_cast<uptr*>(this); + const uptr *src_ptr = reinterpret_cast<const uptr*>(stats); + uptr num_fields = sizeof(*this) / sizeof(uptr); + for (uptr i = 0; i < num_fields; i++) + dst_ptr[i] += src_ptr[i]; +} + +static BlockingMutex print_lock(LINKER_INITIALIZED); + +static AsanStats unknown_thread_stats(LINKER_INITIALIZED); +static AsanStats dead_threads_stats(LINKER_INITIALIZED); +static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED); +// Required for malloc_zone_statistics() on OS X. This can't be stored in +// per-thread AsanStats. +static uptr max_malloced_memory; + +static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) { + AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg); + AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); + if (AsanThread *t = tctx->thread) + accumulated_stats->MergeFrom(&t->stats()); +} + +static void GetAccumulatedStats(AsanStats *stats) { + stats->Clear(); + { + ThreadRegistryLock l(&asanThreadRegistry()); + asanThreadRegistry() + .RunCallbackForEachThreadLocked(MergeThreadStats, stats); + } + stats->MergeFrom(&unknown_thread_stats); + { + BlockingMutexLock lock(&dead_threads_stats_lock); + stats->MergeFrom(&dead_threads_stats); + } + // This is not very accurate: we may miss allocation peaks that happen + // between two updates of accumulated_stats_. For more accurate bookkeeping + // the maximum should be updated on every malloc(), which is unacceptable. + if (max_malloced_memory < stats->malloced) { + max_malloced_memory = stats->malloced; + } +} + +void FlushToDeadThreadStats(AsanStats *stats) { + BlockingMutexLock lock(&dead_threads_stats_lock); + dead_threads_stats.MergeFrom(stats); + stats->Clear(); +} + +void FillMallocStatistics(AsanMallocStats *malloc_stats) { + AsanStats stats; + GetAccumulatedStats(&stats); + malloc_stats->blocks_in_use = stats.mallocs; + malloc_stats->size_in_use = stats.malloced; + malloc_stats->max_size_in_use = max_malloced_memory; + malloc_stats->size_allocated = stats.mmaped; +} + +AsanStats &GetCurrentThreadStats() { + AsanThread *t = GetCurrentThread(); + return (t) ? t->stats() : unknown_thread_stats; +} + +static void PrintAccumulatedStats() { + AsanStats stats; + GetAccumulatedStats(&stats); + // Use lock to keep reports from mixing up. + BlockingMutexLock lock(&print_lock); + stats.Print(); + StackDepotStats *stack_depot_stats = StackDepotGetStats(); + Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", + stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); + PrintInternalAllocatorStats(); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +uptr __sanitizer_get_current_allocated_bytes() { + AsanStats stats; + GetAccumulatedStats(&stats); + uptr malloced = stats.malloced; + uptr freed = stats.freed; + // Return sane value if malloced < freed due to racy + // way we update accumulated stats. + return (malloced > freed) ? malloced - freed : 1; +} + +uptr __sanitizer_get_heap_size() { + AsanStats stats; + GetAccumulatedStats(&stats); + return stats.mmaped - stats.munmaped; +} + +uptr __sanitizer_get_free_bytes() { + AsanStats stats; + GetAccumulatedStats(&stats); + uptr total_free = stats.mmaped + - stats.munmaped + + stats.really_freed; + uptr total_used = stats.malloced + + stats.malloced_redzones; + // Return sane value if total_free < total_used due to racy + // way we update accumulated stats. + return (total_free > total_used) ? total_free - total_used : 1; +} + +uptr __sanitizer_get_unmapped_bytes() { + return 0; +} + +void __asan_print_accumulated_stats() { + PrintAccumulatedStats(); +} diff --git a/contrib/compiler-rt/lib/asan/asan_stats.h b/contrib/compiler-rt/lib/asan/asan_stats.h new file mode 100644 index 000000000000..4605135e166f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stats.h @@ -0,0 +1,72 @@ +//===-- asan_stats.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for statistics. +//===----------------------------------------------------------------------===// +#ifndef ASAN_STATS_H +#define ASAN_STATS_H + +#include "asan_allocator.h" +#include "asan_internal.h" + +namespace __asan { + +// AsanStats struct is NOT thread-safe. +// Each AsanThread has its own AsanStats, which are sometimes flushed +// to the accumulated AsanStats. +struct AsanStats { + // AsanStats must be a struct consisting of uptr fields only. + // When merging two AsanStats structs, we treat them as arrays of uptr. + uptr mallocs; + uptr malloced; + uptr malloced_redzones; + uptr frees; + uptr freed; + uptr real_frees; + uptr really_freed; + uptr reallocs; + uptr realloced; + uptr mmaps; + uptr mmaped; + uptr munmaps; + uptr munmaped; + uptr malloc_large; + uptr malloced_by_size[kNumberOfSizeClasses]; + + // Ctor for global AsanStats (accumulated stats for dead threads). + explicit AsanStats(LinkerInitialized) { } + // Creates empty stats. + AsanStats(); + + void Print(); // Prints formatted stats to stderr. + void Clear(); + void MergeFrom(const AsanStats *stats); +}; + +// Returns stats for GetCurrentThread(), or stats for fake "unknown thread" +// if GetCurrentThread() returns 0. +AsanStats &GetCurrentThreadStats(); +// Flushes a given stats into accumulated stats of dead threads. +void FlushToDeadThreadStats(AsanStats *stats); + +// A cross-platform equivalent of malloc_statistics_t on Mac OS. +struct AsanMallocStats { + uptr blocks_in_use; + uptr size_in_use; + uptr max_size_in_use; + uptr size_allocated; +}; + +void FillMallocStatistics(AsanMallocStats *malloc_stats); + +} // namespace __asan + +#endif // ASAN_STATS_H diff --git a/contrib/compiler-rt/lib/asan/asan_suppressions.cc b/contrib/compiler-rt/lib/asan/asan_suppressions.cc new file mode 100644 index 000000000000..ac8aa023f6ba --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_suppressions.cc @@ -0,0 +1,105 @@ +//===-- asan_suppressions.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Issue suppression and suppression-related functions. +//===----------------------------------------------------------------------===// + +#include "asan_suppressions.h" + +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kInterceptorName[] = "interceptor_name"; +static const char kInterceptorViaFunction[] = "interceptor_via_fun"; +static const char kInterceptorViaLibrary[] = "interceptor_via_lib"; +static const char kODRViolation[] = "odr_violation"; +static const char *kSuppressionTypes[] = { + kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary, + kODRViolation}; + +SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) { + return ""; +} + +void InitializeSuppressions() { + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); + if (&__asan_default_suppressions) + suppression_ctx->Parse(__asan_default_suppressions()); +} + +bool IsInterceptorSuppressed(const char *interceptor_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "interceptor_name" suppressions. + return suppression_ctx->Match(interceptor_name, kInterceptorName, &s); +} + +bool HaveStackTraceBasedSuppressions() { + CHECK(suppression_ctx); + return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) || + suppression_ctx->HasSuppressionType(kInterceptorViaLibrary); +} + +bool IsODRViolationSuppressed(const char *global_var_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "odr_violation" suppressions. + return suppression_ctx->Match(global_var_name, kODRViolation, &s); +} + +bool IsStackTraceSuppressed(const StackTrace *stack) { + if (!HaveStackTraceBasedSuppressions()) + return false; + + CHECK(suppression_ctx); + Symbolizer *symbolizer = Symbolizer::GetOrInit(); + Suppression *s; + for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { + uptr addr = stack->trace[i]; + + if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) { + // Match "interceptor_via_lib" suppressions. + if (const char *module_name = symbolizer->GetModuleNameForPc(addr)) + if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) + return true; + } + + if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) { + SymbolizedStack *frames = symbolizer->SymbolizePC(addr); + CHECK(frames); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + const char *function_name = cur->info.function; + if (!function_name) { + continue; + } + // Match "interceptor_via_fun" suppressions. + if (suppression_ctx->Match(function_name, kInterceptorViaFunction, + &s)) { + frames->ClearAll(); + return true; + } + } + frames->ClearAll(); + } + } + return false; +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_suppressions.h b/contrib/compiler-rt/lib/asan/asan_suppressions.h new file mode 100644 index 000000000000..5246b4b30334 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_suppressions.h @@ -0,0 +1,30 @@ +//===-- asan_suppressions.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_suppressions.cc. +//===----------------------------------------------------------------------===// +#ifndef ASAN_SUPPRESSIONS_H +#define ASAN_SUPPRESSIONS_H + +#include "asan_internal.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __asan { + +void InitializeSuppressions(); +bool IsInterceptorSuppressed(const char *interceptor_name); +bool HaveStackTraceBasedSuppressions(); +bool IsStackTraceSuppressed(const StackTrace *stack); +bool IsODRViolationSuppressed(const char *global_var_name); + +} // namespace __asan + +#endif // ASAN_SUPPRESSIONS_H diff --git a/contrib/compiler-rt/lib/asan/asan_thread.cc b/contrib/compiler-rt/lib/asan/asan_thread.cc new file mode 100644 index 000000000000..0895e4ce0d93 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_thread.cc @@ -0,0 +1,537 @@ +//===-- asan_thread.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Thread-related code. +//===----------------------------------------------------------------------===// +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_poisoning.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// AsanThreadContext implementation. + +void AsanThreadContext::OnCreated(void *arg) { + CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); + if (args->stack) + stack_id = StackDepotPut(*args->stack); + thread = args->thread; + thread->set_context(this); +} + +void AsanThreadContext::OnFinished() { + // Drop the link to the AsanThread object. + thread = nullptr; +} + +// MIPS requires aligned address +static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; +static ThreadRegistry *asan_thread_registry; + +static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED); +static LowLevelAllocator allocator_for_thread_context; + +static ThreadContextBase *GetAsanThreadContext(u32 tid) { + BlockingMutexLock lock(&mu_for_thread_context); + return new(allocator_for_thread_context) AsanThreadContext(tid); +} + +ThreadRegistry &asanThreadRegistry() { + static bool initialized; + // Don't worry about thread_safety - this should be called when there is + // a single thread. + if (!initialized) { + // Never reuse ASan threads: we store pointer to AsanThreadContext + // in TSD and can't reliably tell when no more TSD destructors will + // be called. It would be wrong to reuse AsanThreadContext for another + // thread before all TSD destructors will be called for it. + asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry( + GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads); + initialized = true; + } + return *asan_thread_registry; +} + +AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { + return static_cast<AsanThreadContext *>( + asanThreadRegistry().GetThreadLocked(tid)); +} + +// AsanThread implementation. + +AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, + bool detached) { + uptr PageSize = GetPageSizeCached(); + uptr size = RoundUpTo(sizeof(AsanThread), PageSize); + AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); + thread->start_routine_ = start_routine; + thread->arg_ = arg; + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached, + parent_tid, &args); + + return thread; +} + +void AsanThread::TSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + VReport(1, "T%d TSDDtor\n", context->tid); + if (context->thread) + context->thread->Destroy(); +} + +void AsanThread::Destroy() { + int tid = this->tid(); + VReport(1, "T%d exited\n", tid); + + malloc_storage().CommitBack(); + if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack(); + asanThreadRegistry().FinishThread(tid); + FlushToDeadThreadStats(&stats_); + // We also clear the shadow on thread destruction because + // some code may still be executing in later TSD destructors + // and we don't want it to have any poisoned stack. + ClearShadowForThreadStackAndTLS(); + DeleteFakeStack(tid); + uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); + UnmapOrDie(this, size); + DTLS_Destroy(); +} + +void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, + uptr size) { + if (atomic_load(&stack_switching_, memory_order_relaxed)) { + Report("ERROR: starting fiber switch while in fiber switch\n"); + Die(); + } + + next_stack_bottom_ = bottom; + next_stack_top_ = bottom + size; + atomic_store(&stack_switching_, 1, memory_order_release); + + FakeStack *current_fake_stack = fake_stack_; + if (fake_stack_save) + *fake_stack_save = fake_stack_; + fake_stack_ = nullptr; + SetTLSFakeStack(nullptr); + // if fake_stack_save is null, the fiber will die, delete the fakestack + if (!fake_stack_save && current_fake_stack) + current_fake_stack->Destroy(this->tid()); +} + +void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, + uptr *bottom_old, + uptr *size_old) { + if (!atomic_load(&stack_switching_, memory_order_relaxed)) { + Report("ERROR: finishing a fiber switch that has not started\n"); + Die(); + } + + if (fake_stack_save) { + SetTLSFakeStack(fake_stack_save); + fake_stack_ = fake_stack_save; + } + + if (bottom_old) + *bottom_old = stack_bottom_; + if (size_old) + *size_old = stack_top_ - stack_bottom_; + stack_bottom_ = next_stack_bottom_; + stack_top_ = next_stack_top_; + atomic_store(&stack_switching_, 0, memory_order_release); + next_stack_top_ = 0; + next_stack_bottom_ = 0; +} + +inline AsanThread::StackBounds AsanThread::GetStackBounds() const { + if (!atomic_load(&stack_switching_, memory_order_acquire)) { + // Make sure the stack bounds are fully initialized. + if (stack_bottom_ >= stack_top_) return {0, 0}; + return {stack_bottom_, stack_top_}; + } + char local; + const uptr cur_stack = (uptr)&local; + // Note: need to check next stack first, because FinishSwitchFiber + // may be in process of overwriting stack_top_/bottom_. But in such case + // we are already on the next stack. + if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) + return {next_stack_bottom_, next_stack_top_}; + return {stack_bottom_, stack_top_}; +} + +uptr AsanThread::stack_top() { + return GetStackBounds().top; +} + +uptr AsanThread::stack_bottom() { + return GetStackBounds().bottom; +} + +uptr AsanThread::stack_size() { + const auto bounds = GetStackBounds(); + return bounds.top - bounds.bottom; +} + +// We want to create the FakeStack lazyly on the first use, but not eralier +// than the stack size is known and the procedure has to be async-signal safe. +FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { + uptr stack_size = this->stack_size(); + if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. + return nullptr; + uptr old_val = 0; + // fake_stack_ has 3 states: + // 0 -- not initialized + // 1 -- being initialized + // ptr -- initialized + // This CAS checks if the state was 0 and if so changes it to state 1, + // if that was successful, it initializes the pointer. + if (atomic_compare_exchange_strong( + reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL, + memory_order_relaxed)) { + uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size)); + CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); + stack_size_log = + Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log)); + stack_size_log = + Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log)); + fake_stack_ = FakeStack::Create(stack_size_log); + SetTLSFakeStack(fake_stack_); + return fake_stack_; + } + return nullptr; +} + +void AsanThread::Init(const InitOptions *options) { + next_stack_top_ = next_stack_bottom_ = 0; + atomic_store(&stack_switching_, false, memory_order_release); + CHECK_EQ(this->stack_size(), 0U); + SetThreadStackAndTls(options); + if (stack_top_ != stack_bottom_) { + CHECK_GT(this->stack_size(), 0U); + CHECK(AddrIsInMem(stack_bottom_)); + CHECK(AddrIsInMem(stack_top_ - 1)); + } + ClearShadowForThreadStackAndTLS(); + fake_stack_ = nullptr; + if (__asan_option_detect_stack_use_after_return) + AsyncSignalSafeLazyInitFakeStack(); + int local = 0; + VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), + (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, + &local); +} + +// Fuchsia and RTEMS don't use ThreadStart. +// asan_fuchsia.c/asan_rtems.c define CreateMainThread and +// SetThreadStackAndTls. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +thread_return_t AsanThread::ThreadStart( + tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { + Init(); + asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false, + nullptr); + if (signal_thread_is_registered) + atomic_store(signal_thread_is_registered, 1, memory_order_release); + + if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); + + if (!start_routine_) { + // start_routine_ == 0 if we're on the main thread or on one of the + // OS X libdispatch worker threads. But nobody is supposed to call + // ThreadStart() for the worker threads. + CHECK_EQ(tid(), 0); + return 0; + } + + thread_return_t res = start_routine_(arg_); + + // On POSIX systems we defer this to the TSD destructor. LSan will consider + // the thread's memory as non-live from the moment we call Destroy(), even + // though that memory might contain pointers to heap objects which will be + // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before + // the TSD destructors have run might cause false positives in LSan. + if (!SANITIZER_POSIX) + this->Destroy(); + + return res; +} + +AsanThread *CreateMainThread() { + AsanThread *main_thread = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, + /* stack */ nullptr, /* detached */ true); + SetCurrentThread(main_thread); + main_thread->ThreadStart(internal_getpid(), + /* signal_thread_is_registered */ nullptr); + return main_thread; +} + +// This implementation doesn't use the argument, which is just passed down +// from the caller of Init (which see, above). It's only there to support +// OS-specific implementations that need more information passed through. +void AsanThread::SetThreadStackAndTls(const InitOptions *options) { + DCHECK_EQ(options, nullptr); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_, + &tls_size); + stack_top_ = stack_bottom_ + stack_size; + tls_end_ = tls_begin_ + tls_size; + dtls_ = DTLS_Get(); + + if (stack_top_ != stack_bottom_) { + int local; + CHECK(AddrIsInStack((uptr)&local)); + } +} + +#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +void AsanThread::ClearShadowForThreadStackAndTLS() { + if (stack_top_ != stack_bottom_) + PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); + if (tls_begin_ != tls_end_) { + uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY); + uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY); + FastPoisonShadowPartialRightRedzone(tls_begin_aligned, + tls_end_ - tls_begin_aligned, + tls_end_aligned - tls_end_, 0); + } +} + +bool AsanThread::GetStackFrameAccessByAddr(uptr addr, + StackFrameAccess *access) { + if (stack_top_ == stack_bottom_) + return false; + + uptr bottom = 0; + if (AddrIsInStack(addr)) { + bottom = stack_bottom(); + } else if (has_fake_stack()) { + bottom = fake_stack()->AddrIsInFakeStack(addr); + CHECK(bottom); + access->offset = addr - bottom; + access->frame_pc = ((uptr*)bottom)[2]; + access->frame_descr = (const char *)((uptr*)bottom)[1]; + return true; + } + uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. + uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY); + u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); + u8 *shadow_bottom = (u8*)MemToShadow(bottom); + + while (shadow_ptr >= shadow_bottom && + *shadow_ptr != kAsanStackLeftRedzoneMagic) { + shadow_ptr--; + mem_ptr -= SHADOW_GRANULARITY; + } + + while (shadow_ptr >= shadow_bottom && + *shadow_ptr == kAsanStackLeftRedzoneMagic) { + shadow_ptr--; + mem_ptr -= SHADOW_GRANULARITY; + } + + if (shadow_ptr < shadow_bottom) { + return false; + } + + uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY); + CHECK(ptr[0] == kCurrentStackFrameMagic); + access->offset = addr - (uptr)ptr; + access->frame_pc = ptr[2]; + access->frame_descr = (const char*)ptr[1]; + return true; +} + +uptr AsanThread::GetStackVariableShadowStart(uptr addr) { + uptr bottom = 0; + if (AddrIsInStack(addr)) { + bottom = stack_bottom(); + } else if (has_fake_stack()) { + bottom = fake_stack()->AddrIsInFakeStack(addr); + CHECK(bottom); + } else + return 0; + + uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. + u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); + u8 *shadow_bottom = (u8*)MemToShadow(bottom); + + while (shadow_ptr >= shadow_bottom && + (*shadow_ptr != kAsanStackLeftRedzoneMagic && + *shadow_ptr != kAsanStackMidRedzoneMagic && + *shadow_ptr != kAsanStackRightRedzoneMagic)) + shadow_ptr--; + + return (uptr)shadow_ptr + 1; +} + +bool AsanThread::AddrIsInStack(uptr addr) { + const auto bounds = GetStackBounds(); + return addr >= bounds.bottom && addr < bounds.top; +} + +static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, + void *addr) { + AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); + AsanThread *t = tctx->thread; + if (!t) return false; + if (t->AddrIsInStack((uptr)addr)) return true; + if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr)) + return true; + return false; +} + +AsanThread *GetCurrentThread() { + if (SANITIZER_RTEMS && !asan_inited) + return nullptr; + + AsanThreadContext *context = + reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); + if (!context) { + if (SANITIZER_ANDROID) { + // On Android, libc constructor is called _after_ asan_init, and cleans up + // TSD. Try to figure out if this is still the main thread by the stack + // address. We are not entirely sure that we have correct main thread + // limits, so only do this magic on Android, and only if the found thread + // is the main thread. + AsanThreadContext *tctx = GetThreadContextByTidLocked(0); + if (tctx && ThreadStackContainsAddress(tctx, &context)) { + SetCurrentThread(tctx->thread); + return tctx->thread; + } + } + return nullptr; + } + return context->thread; +} + +void SetCurrentThread(AsanThread *t) { + CHECK(t->context()); + VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(), + (void *)GetThreadSelf()); + // Make sure we do not reset the current AsanThread. + CHECK_EQ(0, AsanTSDGet()); + AsanTSDSet(t->context()); + CHECK_EQ(t->context(), AsanTSDGet()); +} + +u32 GetCurrentTidOrInvalid() { + AsanThread *t = GetCurrentThread(); + return t ? t->tid() : kInvalidTid; +} + +AsanThread *FindThreadByStackAddress(uptr addr) { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *tctx = static_cast<AsanThreadContext *>( + asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, + (void *)addr)); + return tctx ? tctx->thread : nullptr; +} + +void EnsureMainThreadIDIsCorrect() { + AsanThreadContext *context = + reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); + if (context && (context->tid == 0)) + context->os_id = GetTid(); +} + +__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { + __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( + __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); + if (!context) return nullptr; + return context->thread; +} +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, uptr *cache_begin, + uptr *cache_end, DTLS **dtls) { + __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); + if (!t) return false; + *stack_begin = t->stack_bottom(); + *stack_end = t->stack_top(); + *tls_begin = t->tls_begin(); + *tls_end = t->tls_end(); + // ASan doesn't keep allocator caches in TLS, so these are unused. + *cache_begin = 0; + *cache_end = 0; + *dtls = t->dtls(); + return true; +} + +void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, + void *arg) { + __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); + if (t && t->has_fake_stack()) + t->fake_stack()->ForEachFakeFrame(callback, arg); +} + +void LockThreadRegistry() { + __asan::asanThreadRegistry().Lock(); +} + +void UnlockThreadRegistry() { + __asan::asanThreadRegistry().Unlock(); +} + +ThreadRegistry *GetThreadRegistryLocked() { + __asan::asanThreadRegistry().CheckLocked(); + return &__asan::asanThreadRegistry(); +} + +void EnsureMainThreadIDIsCorrect() { + __asan::EnsureMainThreadIDIsCorrect(); +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, + uptr size) { + AsanThread *t = GetCurrentThread(); + if (!t) { + VReport(1, "__asan_start_switch_fiber called from unknown thread\n"); + return; + } + t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_finish_switch_fiber(void* fakestack, + const void **bottom_old, + uptr *size_old) { + AsanThread *t = GetCurrentThread(); + if (!t) { + VReport(1, "__asan_finish_switch_fiber called from unknown thread\n"); + return; + } + t->FinishSwitchFiber((FakeStack*)fakestack, + (uptr*)bottom_old, + (uptr*)size_old); +} +} diff --git a/contrib/compiler-rt/lib/asan/asan_thread.h b/contrib/compiler-rt/lib/asan/asan_thread.h new file mode 100644 index 000000000000..66091921101f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_thread.h @@ -0,0 +1,199 @@ +//===-- asan_thread.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_thread.cc. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_THREAD_H +#define ASAN_THREAD_H + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_fake_stack.h" +#include "asan_stats.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_thread_registry.h" + +namespace __sanitizer { +struct DTLS; +} // namespace __sanitizer + +namespace __asan { + +const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits. +const u32 kMaxNumberOfThreads = (1 << 22); // 4M + +class AsanThread; + +// These objects are created for every thread and are never deleted, +// so we can find them by tid even if the thread is long dead. +class AsanThreadContext : public ThreadContextBase { + public: + explicit AsanThreadContext(int tid) + : ThreadContextBase(tid), announced(false), + destructor_iterations(GetPthreadDestructorIterations()), stack_id(0), + thread(nullptr) {} + bool announced; + u8 destructor_iterations; + u32 stack_id; + AsanThread *thread; + + void OnCreated(void *arg) override; + void OnFinished() override; + + struct CreateThreadContextArgs { + AsanThread *thread; + StackTrace *stack; + }; +}; + +// AsanThreadContext objects are never freed, so we need many of them. +COMPILER_CHECK(sizeof(AsanThreadContext) <= 256); + +// AsanThread are stored in TSD and destroyed when the thread dies. +class AsanThread { + public: + static AsanThread *Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, bool detached); + static void TSDDtor(void *tsd); + void Destroy(); + + struct InitOptions; + void Init(const InitOptions *options = nullptr); + + thread_return_t ThreadStart(tid_t os_id, + atomic_uintptr_t *signal_thread_is_registered); + + uptr stack_top(); + uptr stack_bottom(); + uptr stack_size(); + uptr tls_begin() { return tls_begin_; } + uptr tls_end() { return tls_end_; } + DTLS *dtls() { return dtls_; } + u32 tid() { return context_->tid; } + AsanThreadContext *context() { return context_; } + void set_context(AsanThreadContext *context) { context_ = context; } + + struct StackFrameAccess { + uptr offset; + uptr frame_pc; + const char *frame_descr; + }; + bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access); + + // Returns a pointer to the start of the stack variable's shadow memory. + uptr GetStackVariableShadowStart(uptr addr); + + bool AddrIsInStack(uptr addr); + + void DeleteFakeStack(int tid) { + if (!fake_stack_) return; + FakeStack *t = fake_stack_; + fake_stack_ = nullptr; + SetTLSFakeStack(nullptr); + t->Destroy(tid); + } + + void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size); + void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old, + uptr *size_old); + + bool has_fake_stack() { + return !atomic_load(&stack_switching_, memory_order_relaxed) && + (reinterpret_cast<uptr>(fake_stack_) > 1); + } + + FakeStack *fake_stack() { + if (!__asan_option_detect_stack_use_after_return) + return nullptr; + if (atomic_load(&stack_switching_, memory_order_relaxed)) + return nullptr; + if (!has_fake_stack()) + return AsyncSignalSafeLazyInitFakeStack(); + return fake_stack_; + } + + // True is this thread is currently unwinding stack (i.e. collecting a stack + // trace). Used to prevent deadlocks on platforms where libc unwinder calls + // malloc internally. See PR17116 for more details. + bool isUnwinding() const { return unwinding_; } + void setUnwinding(bool b) { unwinding_ = b; } + + AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } + AsanStats &stats() { return stats_; } + + private: + // NOTE: There is no AsanThread constructor. It is allocated + // via mmap() and *must* be valid in zero-initialized state. + + void SetThreadStackAndTls(const InitOptions *options); + + void ClearShadowForThreadStackAndTLS(); + FakeStack *AsyncSignalSafeLazyInitFakeStack(); + + struct StackBounds { + uptr bottom; + uptr top; + }; + StackBounds GetStackBounds() const; + + AsanThreadContext *context_; + thread_callback_t start_routine_; + void *arg_; + + uptr stack_top_; + uptr stack_bottom_; + // these variables are used when the thread is about to switch stack + uptr next_stack_top_; + uptr next_stack_bottom_; + // true if switching is in progress + atomic_uint8_t stack_switching_; + + uptr tls_begin_; + uptr tls_end_; + DTLS *dtls_; + + FakeStack *fake_stack_; + AsanThreadLocalMallocStorage malloc_storage_; + AsanStats stats_; + bool unwinding_; +}; + +// ScopedUnwinding is a scope for stacktracing member of a context +class ScopedUnwinding { + public: + explicit ScopedUnwinding(AsanThread *t) : thread(t) { + t->setUnwinding(true); + } + ~ScopedUnwinding() { thread->setUnwinding(false); } + + private: + AsanThread *thread; +}; + +// Returns a single instance of registry. +ThreadRegistry &asanThreadRegistry(); + +// Must be called under ThreadRegistryLock. +AsanThreadContext *GetThreadContextByTidLocked(u32 tid); + +// Get the current thread. May return 0. +AsanThread *GetCurrentThread(); +void SetCurrentThread(AsanThread *t); +u32 GetCurrentTidOrInvalid(); +AsanThread *FindThreadByStackAddress(uptr addr); + +// Used to handle fork(). +void EnsureMainThreadIDIsCorrect(); +} // namespace __asan + +#endif // ASAN_THREAD_H diff --git a/contrib/compiler-rt/lib/asan/asan_win.cc b/contrib/compiler-rt/lib/asan/asan_win.cc new file mode 100644 index 000000000000..068f4a5d247d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win.cc @@ -0,0 +1,363 @@ +//===-- asan_win.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Windows-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +#include <stdlib.h> + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_win.h" +#include "sanitizer_common/sanitizer_win_defs.h" + +using namespace __asan; // NOLINT + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +int __asan_should_detect_stack_use_after_return() { + __asan_init(); + return __asan_option_detect_stack_use_after_return; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_shadow_memory_dynamic_address() { + __asan_init(); + return __asan_shadow_memory_dynamic_address; +} +} // extern "C" + +// ---------------------- Windows-specific interceptors ---------------- {{{ +static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; +static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler; + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) { + EXCEPTION_RECORD *exception_record = info->ExceptionRecord; + CONTEXT *context = info->ContextRecord; + + // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. + + SignalContext sig(exception_record, context); + ReportDeadlySignal(sig); + UNREACHABLE("returned from reporting deadly signal"); +} + +// Wrapper SEH Handler. If the exception should be handled by asan, we call +// __asan_unhandled_exception_filter, otherwise, we execute the user provided +// exception handler or the default. +static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { + DWORD exception_code = info->ExceptionRecord->ExceptionCode; + if (__sanitizer::IsHandledDeadlyException(exception_code)) + return __asan_unhandled_exception_filter(info); + if (user_seh_handler) + return user_seh_handler(info); + // Bubble out to the default exception filter. + if (default_seh_handler) + return default_seh_handler(info); + return EXCEPTION_CONTINUE_SEARCH; +} + +INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter, + LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) { + CHECK(REAL(SetUnhandledExceptionFilter)); + if (ExceptionFilter == &SEHHandler) + return REAL(SetUnhandledExceptionFilter)(ExceptionFilter); + // We record the user provided exception handler to be called for all the + // exceptions unhandled by asan. + Swap(ExceptionFilter, user_seh_handler); + return ExceptionFilter; +} + +INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) { + CHECK(REAL(RtlRaiseException)); + // This is a noreturn function, unless it's one of the exceptions raised to + // communicate with the debugger, such as the one from OutputDebugString. + if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C) + __asan_handle_no_return(); + REAL(RtlRaiseException)(ExceptionRecord); +} + +INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { + CHECK(REAL(RaiseException)); + __asan_handle_no_return(); + REAL(RaiseException)(a, b, c, d); +} + +#ifdef _WIN64 + +INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT + CHECK(REAL(__C_specific_handler)); + __asan_handle_no_return(); + return REAL(__C_specific_handler)(a, b, c, d); +} + +#else + +INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler3)); + __asan_handle_no_return(); + return REAL(_except_handler3)(a, b, c, d); +} + +#if ASAN_DYNAMIC +// This handler is named differently in -MT and -MD CRTs. +#define _except_handler4 _except_handler4_common +#endif +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler4)); + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} +#endif + +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + AsanThread *t = (AsanThread*)arg; + SetCurrentThread(t); + return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); +} + +INTERCEPTOR_WINAPI(DWORD, CreateThread, + void* security, uptr stack_size, + DWORD (__stdcall *start_routine)(void*), void* arg, + DWORD thr_flags, void* tid) { + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + // FIXME: The CreateThread interceptor is not the same as a pthread_create + // one. This is a bandaid fix for PR22025. + bool detached = false; // FIXME: how can we determine it on Windows? + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + return REAL(CreateThread)(security, stack_size, + asan_thread_start, t, thr_flags, tid); +} + +// }}} + +namespace __asan { + +void InitializePlatformInterceptors() { + // The interceptors were not designed to be removable, so we have to keep this + // module alive for the life of the process. + HMODULE pinned; + CHECK(GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | + GET_MODULE_HANDLE_EX_FLAG_PIN, + (LPCWSTR)&InitializePlatformInterceptors, + &pinned)); + + ASAN_INTERCEPT_FUNC(CreateThread); + ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter); + +#ifdef _WIN64 + ASAN_INTERCEPT_FUNC(__C_specific_handler); +#else + ASAN_INTERCEPT_FUNC(_except_handler3); + ASAN_INTERCEPT_FUNC(_except_handler4); +#endif + + // Try to intercept kernel32!RaiseException, and if that fails, intercept + // ntdll!RtlRaiseException instead. + if (!::__interception::OverrideFunction("RaiseException", + (uptr)WRAP(RaiseException), + (uptr *)&REAL(RaiseException))) { + CHECK(::__interception::OverrideFunction("RtlRaiseException", + (uptr)WRAP(RtlRaiseException), + (uptr *)&REAL(RtlRaiseException))); + } +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +// ---------------------- TSD ---------------- {{{ +static bool tsd_key_inited = false; + +static __declspec(thread) void *fake_tsd = 0; + +void AsanTSDInit(void (*destructor)(void *tsd)) { + // FIXME: we're ignoring the destructor for now. + tsd_key_inited = true; +} + +void *AsanTSDGet() { + CHECK(tsd_key_inited); + return fake_tsd; +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_key_inited); + fake_tsd = tsd; +} + +void PlatformTSDDtor(void *tsd) { + AsanThread::TSDDtor(tsd); +} +// }}} + +// ---------------------- Various stuff ---------------- {{{ +void *AsanDoesNotSupportStaticLinkage() { +#if defined(_DEBUG) +#error Please build the runtime with a non-debug CRT: /MD or /MT +#endif + return 0; +} + +uptr FindDynamicShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr alignment = 8 * granularity; + uptr left_padding = granularity; + uptr space_size = kHighShadowEnd + left_padding; + uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, + granularity, nullptr, nullptr); + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + +void AsanCheckDynamicRTPrereqs() {} + +void AsanCheckIncompatibleRT() {} + +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} + +void AsanOnDeadlySignal(int, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +#if SANITIZER_WINDOWS64 +// Exception handler for dealing with shadow memory. +static LONG CALLBACK +ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { + uptr page_size = GetPageSizeCached(); + // Only handle access violations. + if (exception_pointers->ExceptionRecord->ExceptionCode != + EXCEPTION_ACCESS_VIOLATION) { + return EXCEPTION_CONTINUE_SEARCH; + } + + // Only handle access violations that land within the shadow memory. + uptr addr = + (uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]); + + // Check valid shadow range. + if (!AddrIsInShadow(addr)) return EXCEPTION_CONTINUE_SEARCH; + + // This is an access violation while trying to read from the shadow. Commit + // the relevant page and let execution continue. + + // Determine the address of the page that is being accessed. + uptr page = RoundDownTo(addr, page_size); + + // Commit the page. + uptr result = + (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE); + if (result != page) return EXCEPTION_CONTINUE_SEARCH; + + // The page mapping succeeded, so continue execution as usual. + return EXCEPTION_CONTINUE_EXECUTION; +} + +#endif + +void InitializePlatformExceptionHandlers() { +#if SANITIZER_WINDOWS64 + // On Win64, we map memory on demand with access violation handler. + // Install our exception handler. + CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler)); +#endif +} + +bool IsSystemHeapAddress(uptr addr) { + return ::HeapValidate(GetProcessHeap(), 0, (void*)addr) != FALSE; +} + +// We want to install our own exception handler (EH) to print helpful reports +// on access violations and whatnot. Unfortunately, the CRT initializers assume +// they are run before any user code and drop any previously-installed EHs on +// the floor, so we can't install our handler inside __asan_init. +// (See crt0dat.c in the CRT sources for the details) +// +// Things get even more complicated with the dynamic runtime, as it finishes its +// initialization before the .exe module CRT begins to initialize. +// +// For the static runtime (-MT), it's enough to put a callback to +// __asan_set_seh_filter in the last section for C initializers. +// +// For the dynamic runtime (-MD), we want link the same +// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter +// will be called for each instrumented module. This ensures that at least one +// __asan_set_seh_filter call happens after the .exe module CRT is initialized. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +int __asan_set_seh_filter() { + // We should only store the previous handler if it's not our own handler in + // order to avoid loops in the EH chain. + auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler); + if (prev_seh_handler != &SEHHandler) + default_seh_handler = prev_seh_handler; + return 0; +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} + +#if !ASAN_DYNAMIC +// The CRT runs initializers in this order: +// - C initializers, from XIA to XIZ +// - C++ initializers, from XCA to XCZ +// Prior to 2015, the CRT set the unhandled exception filter at priority XIY, +// near the end of C initialization. Starting in 2015, it was moved to the +// beginning of C++ initialization. We set our priority to XCAB to run +// immediately after the CRT runs. This way, our exception filter is called +// first and we can delegate to their filter if appropriate. +#pragma section(".CRT$XCAB", long, read) // NOLINT +__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() = + __asan_set_seh_filter; + +// Piggyback on the TLS initialization callback directory to initialize asan as +// early as possible. Initializers in .CRT$XL* are called directly by ntdll, +// which run before the CRT. Users also add code to .CRT$XLC, so it's important +// to run our initializers first. +static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) { + if (reason == DLL_PROCESS_ATTACH) __asan_init(); +} + +#pragma section(".CRT$XLAB", long, read) // NOLINT +__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *, + unsigned long, void *) = asan_thread_init; +#endif + +WIN_FORCE_LINK(__asan_dso_reg_hook) + +// }}} +} // namespace __asan + +#endif // SANITIZER_WINDOWS diff --git a/contrib/compiler-rt/lib/asan/asan_win_dll_thunk.cc b/contrib/compiler-rt/lib/asan/asan_win_dll_thunk.cc new file mode 100644 index 000000000000..df593ab92de6 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win_dll_thunk.cc @@ -0,0 +1,153 @@ +//===-- asan_win_dll_thunk.cc ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file defines a family of thunks that should be statically linked into +// the DLLs that have ASan instrumentation in order to delegate the calls to the +// shared runtime that lives in the main binary. +// See https://github.com/google/sanitizers/issues/209 for the details. +//===----------------------------------------------------------------------===// + +#ifdef SANITIZER_DLL_THUNK +#include "asan_init_version.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_win_defs.h" +#include "sanitizer_common/sanitizer_win_dll_thunk.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" + +// ASan own interface functions. +#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name) +#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) +#include "asan_interface.inc" + +// Memory allocation functions. +INTERCEPT_WRAP_V_W(free) +INTERCEPT_WRAP_V_W(_free_base) +INTERCEPT_WRAP_V_WW(_free_dbg) + +INTERCEPT_WRAP_W_W(malloc) +INTERCEPT_WRAP_W_W(_malloc_base) +INTERCEPT_WRAP_W_WWWW(_malloc_dbg) + +INTERCEPT_WRAP_W_WW(calloc) +INTERCEPT_WRAP_W_WW(_calloc_base) +INTERCEPT_WRAP_W_WWWWW(_calloc_dbg) +INTERCEPT_WRAP_W_WWW(_calloc_impl) + +INTERCEPT_WRAP_W_WW(realloc) +INTERCEPT_WRAP_W_WW(_realloc_base) +INTERCEPT_WRAP_W_WWW(_realloc_dbg) +INTERCEPT_WRAP_W_WWW(_recalloc) +INTERCEPT_WRAP_W_WWW(_recalloc_base) + +INTERCEPT_WRAP_W_W(_msize) +INTERCEPT_WRAP_W_W(_msize_base) +INTERCEPT_WRAP_W_W(_expand) +INTERCEPT_WRAP_W_W(_expand_dbg) + +// TODO(timurrrr): Might want to add support for _aligned_* allocation +// functions to detect a bit more bugs. Those functions seem to wrap malloc(). + +// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc). + +INTERCEPT_LIBRARY_FUNCTION(atoi); +INTERCEPT_LIBRARY_FUNCTION(atol); +INTERCEPT_LIBRARY_FUNCTION(frexp); +INTERCEPT_LIBRARY_FUNCTION(longjmp); +#if SANITIZER_INTERCEPT_MEMCHR +INTERCEPT_LIBRARY_FUNCTION(memchr); +#endif +INTERCEPT_LIBRARY_FUNCTION(memcmp); +INTERCEPT_LIBRARY_FUNCTION(memcpy); +INTERCEPT_LIBRARY_FUNCTION(memmove); +INTERCEPT_LIBRARY_FUNCTION(memset); +INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT +INTERCEPT_LIBRARY_FUNCTION(strchr); +INTERCEPT_LIBRARY_FUNCTION(strcmp); +INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT +INTERCEPT_LIBRARY_FUNCTION(strcspn); +INTERCEPT_LIBRARY_FUNCTION(strdup); +INTERCEPT_LIBRARY_FUNCTION(strlen); +INTERCEPT_LIBRARY_FUNCTION(strncat); +INTERCEPT_LIBRARY_FUNCTION(strncmp); +INTERCEPT_LIBRARY_FUNCTION(strncpy); +INTERCEPT_LIBRARY_FUNCTION(strnlen); +INTERCEPT_LIBRARY_FUNCTION(strpbrk); +INTERCEPT_LIBRARY_FUNCTION(strrchr); +INTERCEPT_LIBRARY_FUNCTION(strspn); +INTERCEPT_LIBRARY_FUNCTION(strstr); +INTERCEPT_LIBRARY_FUNCTION(strtok); +INTERCEPT_LIBRARY_FUNCTION(strtol); +INTERCEPT_LIBRARY_FUNCTION(wcslen); +INTERCEPT_LIBRARY_FUNCTION(wcsnlen); + +#ifdef _WIN64 +INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler); +#else +INTERCEPT_LIBRARY_FUNCTION(_except_handler3); +// _except_handler4 checks -GS cookie which is different for each module, so we +// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4). +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} +#endif + +// Windows specific functions not included in asan_interface.inc. +INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return) +INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address) +INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter) + +using namespace __sanitizer; + +extern "C" { +int __asan_option_detect_stack_use_after_return; +uptr __asan_shadow_memory_dynamic_address; +} // extern "C" + +static int asan_dll_thunk_init() { + typedef void (*fntype)(); + static fntype fn = 0; + // asan_dll_thunk_init is expected to be called by only one thread. + if (fn) return 0; + + // Ensure all interception was executed. + __dll_thunk_init(); + + fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init"); + fn(); + __asan_option_detect_stack_use_after_return = + (__asan_should_detect_stack_use_after_return() != 0); + __asan_shadow_memory_dynamic_address = + (uptr)__asan_get_shadow_memory_dynamic_address(); + +#ifndef _WIN64 + INTERCEPT_FUNCTION(_except_handler4); +#endif + // In DLLs, the callbacks are expected to return 0, + // otherwise CRT initialization fails. + return 0; +} + +#pragma section(".CRT$XIB", long, read) // NOLINT +__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init; + +static void WINAPI asan_thread_init(void *mod, unsigned long reason, + void *reserved) { + if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init(); +} + +#pragma section(".CRT$XLAB", long, read) // NOLINT +__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *, + unsigned long, void *) = asan_thread_init; + +WIN_FORCE_LINK(__asan_dso_reg_hook) + +#endif // SANITIZER_DLL_THUNK diff --git a/contrib/compiler-rt/lib/asan/asan_win_dynamic_runtime_thunk.cc b/contrib/compiler-rt/lib/asan/asan_win_dynamic_runtime_thunk.cc new file mode 100644 index 000000000000..416c73b23629 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win_dynamic_runtime_thunk.cc @@ -0,0 +1,131 @@ +//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file defines things that need to be present in the application modules +// to interact with the ASan DLL runtime correctly and can't be implemented +// using the default "import library" generated when linking the DLL RTL. +// +// This includes: +// - creating weak aliases to default implementation imported from asan dll. +// - forwarding the detect_stack_use_after_return runtime option +// - working around deficiencies of the MD runtime +// - installing a custom SEH handler +// +//===----------------------------------------------------------------------===// + +#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK +#define SANITIZER_IMPORT_INTERFACE 1 +#include "sanitizer_common/sanitizer_win_defs.h" +#define WIN32_LEAN_AND_MEAN +#include <windows.h> + +// Define weak alias for all weak functions imported from asan dll. +#define INTERFACE_FUNCTION(Name) +#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name) +#include "asan_interface.inc" + +// First, declare CRT sections we'll be using in this file +#pragma section(".CRT$XIB", long, read) // NOLINT +#pragma section(".CRT$XID", long, read) // NOLINT +#pragma section(".CRT$XCAB", long, read) // NOLINT +#pragma section(".CRT$XTW", long, read) // NOLINT +#pragma section(".CRT$XTY", long, read) // NOLINT +#pragma section(".CRT$XLAB", long, read) // NOLINT + +//////////////////////////////////////////////////////////////////////////////// +// Define a copy of __asan_option_detect_stack_use_after_return that should be +// used when linking an MD runtime with a set of object files on Windows. +// +// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return', +// so normally we would just dllimport it. Unfortunately, the dllimport +// attribute adds __imp_ prefix to the symbol name of a variable. +// Since in general we don't know if a given TU is going to be used +// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows +// just to work around this issue, let's clone the variable that is constant +// after initialization anyways. +extern "C" { +__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); +int __asan_option_detect_stack_use_after_return; + +__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address(); +void* __asan_shadow_memory_dynamic_address; +} + +static int InitializeClonedVariables() { + __asan_option_detect_stack_use_after_return = + __asan_should_detect_stack_use_after_return(); + __asan_shadow_memory_dynamic_address = + __asan_get_shadow_memory_dynamic_address(); + return 0; +} + +static void NTAPI asan_thread_init(void *mod, unsigned long reason, + void *reserved) { + if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables(); +} + +// Our cloned variables must be initialized before C/C++ constructors. If TLS +// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB +// initializer is needed as a backup. +__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() = + InitializeClonedVariables; +__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *, + unsigned long, void *) = asan_thread_init; + +//////////////////////////////////////////////////////////////////////////////// +// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL +// unload or on exit. ASan relies on LLVM global_dtors to call +// __asan_unregister_globals on these events, which unfortunately doesn't work +// with the MD runtime, see PR22545 for the details. +// To work around this, for each DLL we schedule a call to UnregisterGlobals +// using atexit() that calls a small subset of C terminators +// where LLVM global_dtors is placed. Fingers crossed, no other C terminators +// are there. +extern "C" int __cdecl atexit(void (__cdecl *f)(void)); +extern "C" void __cdecl _initterm(void *a, void *b); + +namespace { +__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0; +__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0; + +void UnregisterGlobals() { + _initterm(&before_global_dtors, &after_global_dtors); +} + +int ScheduleUnregisterGlobals() { + return atexit(UnregisterGlobals); +} +} // namespace + +// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after +// atexit() is initialized (.CRT$XIC). As this is executed before C++ +// initializers (think ctors for globals), UnregisterGlobals gets executed after +// dtors for C++ globals. +__declspec(allocate(".CRT$XID")) +int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals; + +//////////////////////////////////////////////////////////////////////////////// +// ASan SEH handling. +// We need to set the ASan-specific SEH handler at the end of CRT initialization +// of each module (see also asan_win.cc). +extern "C" { +__declspec(dllimport) int __asan_set_seh_filter(); +static int SetSEHFilter() { return __asan_set_seh_filter(); } + +// Unfortunately, putting a pointer to __asan_set_seh_filter into +// __asan_intercept_seh gets optimized out, so we have to use an extra function. +__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() = + SetSEHFilter; +} + +WIN_FORCE_LINK(__asan_dso_reg_hook) + +#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK diff --git a/contrib/compiler-rt/lib/asan/asan_win_weak_interception.cc b/contrib/compiler-rt/lib/asan/asan_win_weak_interception.cc new file mode 100644 index 000000000000..ca26f914cf5f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win_weak_interception.cc @@ -0,0 +1,23 @@ +//===-- asan_win_weak_interception.cc -------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This module should be included in Address Sanitizer when it is implemented as +// a shared library on Windows (dll), in order to delegate the calls of weak +// functions to the implementation in the main executable when a strong +// definition is provided. +//===----------------------------------------------------------------------===// +#ifdef SANITIZER_DYNAMIC +#include "sanitizer_common/sanitizer_win_weak_interception.h" +#include "asan_interface_internal.h" +// Check if strong definitions for weak functions are present in the main +// executable. If that is the case, override dll functions to point to strong +// implementations. +#define INTERFACE_FUNCTION(Name) +#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) +#include "asan_interface.inc" +#endif // SANITIZER_DYNAMIC diff --git a/contrib/compiler-rt/lib/asan/weak_symbols.txt b/contrib/compiler-rt/lib/asan/weak_symbols.txt new file mode 100644 index 000000000000..fe680f8a9a4f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/weak_symbols.txt @@ -0,0 +1,12 @@ +___asan_default_options +___asan_default_suppressions +___asan_on_error +___asan_set_shadow_00 +___asan_set_shadow_f1 +___asan_set_shadow_f2 +___asan_set_shadow_f3 +___asan_set_shadow_f4 +___asan_set_shadow_f5 +___asan_set_shadow_f6 +___asan_set_shadow_f7 +___asan_set_shadow_f8 |