diff options
Diffstat (limited to 'contrib/compiler-rt/lib/asan')
64 files changed, 14498 insertions, 0 deletions
diff --git a/contrib/compiler-rt/lib/asan/README.txt b/contrib/compiler-rt/lib/asan/README.txt new file mode 100644 index 000000000000..b9c43acd5fe4 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/README.txt @@ -0,0 +1,28 @@ +AddressSanitizer RT +================================ +This directory contains sources of the AddressSanitizer (asan) runtime library. +We are in the process of integrating AddressSanitizer with LLVM, stay tuned. + +Directory structure: +README.txt : This file. +Makefile.mk : File for make-based build. +CMakeLists.txt : File for cmake-based build. +asan_*.{cc,h} : Sources of the asan runtime library. +scripts/* : Helper scripts. +tests/* : ASan unit tests. + +Also ASan runtime needs the following libraries: +lib/interception/ : Machinery used to intercept function calls. +lib/sanitizer_common/ : Code shared between ASan and TSan. + +Currently ASan runtime can be built by both make and cmake build systems. +(see compiler-rt/make and files Makefile.mk for make-based build and +files CMakeLists.txt for cmake-based build). + +ASan unit and output tests work only with cmake. You may run this +command from the root of your cmake build tree: + +make check-asan + +For more instructions see: +http://code.google.com/p/address-sanitizer/wiki/HowToBuild diff --git a/contrib/compiler-rt/lib/asan/asan.syms.extra b/contrib/compiler-rt/lib/asan/asan.syms.extra new file mode 100644 index 000000000000..007aafe380a8 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan.syms.extra @@ -0,0 +1,3 @@ +__asan_* +__lsan_* +__ubsan_* diff --git a/contrib/compiler-rt/lib/asan/asan_activation.cc b/contrib/compiler-rt/lib/asan/asan_activation.cc new file mode 100644 index 000000000000..eb4a6db0b85e --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_activation.cc @@ -0,0 +1,88 @@ +//===-- asan_activation.cc --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan activation/deactivation logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_allocator.h" +#include "asan_flags.h" +#include "asan_internal.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +static struct AsanDeactivatedFlags { + int quarantine_size; + int max_redzone; + int malloc_context_size; + bool poison_heap; + bool alloc_dealloc_mismatch; + bool allocator_may_return_null; +} asan_deactivated_flags; + +static bool asan_is_deactivated; + +void AsanStartDeactivated() { + VReport(1, "Deactivating ASan\n"); + // Save flag values. + asan_deactivated_flags.quarantine_size = flags()->quarantine_size; + asan_deactivated_flags.max_redzone = flags()->max_redzone; + asan_deactivated_flags.poison_heap = flags()->poison_heap; + asan_deactivated_flags.malloc_context_size = + common_flags()->malloc_context_size; + asan_deactivated_flags.alloc_dealloc_mismatch = + flags()->alloc_dealloc_mismatch; + asan_deactivated_flags.allocator_may_return_null = + common_flags()->allocator_may_return_null; + + flags()->quarantine_size = 0; + flags()->max_redzone = 16; + flags()->poison_heap = false; + common_flags()->malloc_context_size = 0; + flags()->alloc_dealloc_mismatch = false; + common_flags()->allocator_may_return_null = true; + + asan_is_deactivated = true; +} + +void AsanActivate() { + if (!asan_is_deactivated) return; + VReport(1, "Activating ASan\n"); + + // Restore flag values. + // FIXME: this is not atomic, and there may be other threads alive. + flags()->quarantine_size = asan_deactivated_flags.quarantine_size; + flags()->max_redzone = asan_deactivated_flags.max_redzone; + flags()->poison_heap = asan_deactivated_flags.poison_heap; + common_flags()->malloc_context_size = + asan_deactivated_flags.malloc_context_size; + flags()->alloc_dealloc_mismatch = + asan_deactivated_flags.alloc_dealloc_mismatch; + common_flags()->allocator_may_return_null = + asan_deactivated_flags.allocator_may_return_null; + + ParseExtraActivationFlags(); + + ReInitializeAllocator(); + + asan_is_deactivated = false; + VReport( + 1, + "quarantine_size %d, max_redzone %d, poison_heap %d, " + "malloc_context_size %d, alloc_dealloc_mismatch %d, " + "allocator_may_return_null %d\n", + flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap, + common_flags()->malloc_context_size, flags()->alloc_dealloc_mismatch, + common_flags()->allocator_may_return_null); +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_activation.h b/contrib/compiler-rt/lib/asan/asan_activation.h new file mode 100644 index 000000000000..dafb840a6042 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_activation.h @@ -0,0 +1,23 @@ +//===-- asan_activation.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan activation/deactivation logic. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_ACTIVATION_H +#define ASAN_ACTIVATION_H + +namespace __asan { +void AsanStartDeactivated(); +void AsanActivate(); +} // namespace __asan + +#endif // ASAN_ACTIVATION_H diff --git a/contrib/compiler-rt/lib/asan/asan_allocator.h b/contrib/compiler-rt/lib/asan/asan_allocator.h new file mode 100644 index 000000000000..6d3a99282a4a --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_allocator.h @@ -0,0 +1,165 @@ +//===-- asan_allocator.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_allocator2.cc. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_ALLOCATOR_H +#define ASAN_ALLOCATOR_H + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_list.h" + +namespace __asan { + +enum AllocType { + FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc. + FROM_NEW = 2, // Memory block came from operator new. + FROM_NEW_BR = 3 // Memory block came from operator new [ ] +}; + +static const uptr kNumberOfSizeClasses = 255; +struct AsanChunk; + +void InitializeAllocator(); +void ReInitializeAllocator(); + +class AsanChunkView { + public: + explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} + bool IsValid(); // Checks if AsanChunkView points to a valid allocated + // or quarantined chunk. + uptr Beg(); // First byte of user memory. + uptr End(); // Last byte of user memory. + uptr UsedSize(); // Size requested by the user. + uptr AllocTid(); + uptr FreeTid(); + bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } + StackTrace GetAllocStack(); + StackTrace GetFreeStack(); + bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) { + if (addr >= Beg() && (addr + access_size) <= End()) { + *offset = addr - Beg(); + return true; + } + return false; + } + bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) { + (void)access_size; + if (addr < Beg()) { + *offset = Beg() - addr; + return true; + } + return false; + } + bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) { + if (addr + access_size > End()) { + *offset = addr - End(); + return true; + } + return false; + } + + private: + AsanChunk *const chunk_; +}; + +AsanChunkView FindHeapChunkByAddress(uptr address); + +// List of AsanChunks with total size. +class AsanChunkFifoList: public IntrusiveList<AsanChunk> { + public: + explicit AsanChunkFifoList(LinkerInitialized) { } + AsanChunkFifoList() { clear(); } + void Push(AsanChunk *n); + void PushList(AsanChunkFifoList *q); + AsanChunk *Pop(); + uptr size() { return size_; } + void clear() { + IntrusiveList<AsanChunk>::clear(); + size_ = 0; + } + private: + uptr size_; +}; + +struct AsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const; + void OnUnmap(uptr p, uptr size) const; +}; + +#if SANITIZER_CAN_USE_ALLOCATOR64 +# if defined(__powerpc64__) +const uptr kAllocatorSpace = 0xa0000000000ULL; +const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +# else +const uptr kAllocatorSpace = 0x600000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +# endif +typedef DefaultSizeClassMap SizeClassMap; +typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, + SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; +#else // Fallback to SizeClassAllocator32. +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +# if SANITIZER_WORDSIZE == 32 +typedef FlatByteMap<kNumRegions> ByteMap; +# elif SANITIZER_WORDSIZE == 64 +typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; +# endif +typedef CompactSizeClassMap SizeClassMap; +typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16, + SizeClassMap, kRegionSizeLog, + ByteMap, + AsanMapUnmapCallback> PrimaryAllocator; +#endif // SANITIZER_CAN_USE_ALLOCATOR64 + +typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; +typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; +typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, + SecondaryAllocator> Allocator; + + +struct AsanThreadLocalMallocStorage { + uptr quarantine_cache[16]; + AllocatorCache allocator2_cache; + void CommitBack(); + private: + // These objects are allocated via mmap() and are zero-initialized. + AsanThreadLocalMallocStorage() {} +}; + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type); +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type); +void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, + AllocType alloc_type); + +void *asan_malloc(uptr size, BufferedStackTrace *stack); +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack); +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack); +void *asan_valloc(uptr size, BufferedStackTrace *stack); +void *asan_pvalloc(uptr size, BufferedStackTrace *stack); + +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack); +uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp); + +uptr asan_mz_size(const void *ptr); +void asan_mz_force_lock(); +void asan_mz_force_unlock(); + +void PrintInternalAllocatorStats(); + +} // namespace __asan +#endif // ASAN_ALLOCATOR_H diff --git a/contrib/compiler-rt/lib/asan/asan_allocator2.cc b/contrib/compiler-rt/lib/asan/asan_allocator2.cc new file mode 100644 index 000000000000..52bdcf607f57 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_allocator2.cc @@ -0,0 +1,792 @@ +//===-- asan_allocator2.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Implementation of ASan's memory allocator, 2-nd version. +// This variant uses the allocator from sanitizer_common, i.e. the one shared +// with ThreadSanitizer and MemorySanitizer. +// +//===----------------------------------------------------------------------===// +#include "asan_allocator.h" + +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_list.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_quarantine.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { + PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mmaps++; + thread_stats.mmaped += size; +} +void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { + PoisonShadow(p, size, 0); + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + FlushUnneededASanShadowMemory(p, size); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.munmaps++; + thread_stats.munmaped += size; +} + +// We can not use THREADLOCAL because it is not supported on some of the +// platforms we care about (OSX 10.6, Android). +// static THREADLOCAL AllocatorCache cache; +AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + return &ms->allocator2_cache; +} + +static Allocator allocator; + +static const uptr kMaxAllowedMallocSize = + FIRST_32_SECOND_64(3UL << 30, 64UL << 30); + +static const uptr kMaxThreadLocalQuarantine = + FIRST_32_SECOND_64(1 << 18, 1 << 20); + +// Every chunk of memory allocated by this allocator can be in one of 3 states: +// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. +// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. +// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. +enum { + CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. + CHUNK_ALLOCATED = 2, + CHUNK_QUARANTINE = 3 +}; + +// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. +// We use adaptive redzones: for larger allocation larger redzones are used. +static u32 RZLog2Size(u32 rz_log) { + CHECK_LT(rz_log, 8); + return 16 << rz_log; +} + +static u32 RZSize2Log(u32 rz_size) { + CHECK_GE(rz_size, 16); + CHECK_LE(rz_size, 2048); + CHECK(IsPowerOfTwo(rz_size)); + u32 res = Log2(rz_size) - 4; + CHECK_EQ(rz_size, RZLog2Size(res)); + return res; +} + +static uptr ComputeRZLog(uptr user_requested_size) { + u32 rz_log = + user_requested_size <= 64 - 16 ? 0 : + user_requested_size <= 128 - 32 ? 1 : + user_requested_size <= 512 - 64 ? 2 : + user_requested_size <= 4096 - 128 ? 3 : + user_requested_size <= (1 << 14) - 256 ? 4 : + user_requested_size <= (1 << 15) - 512 ? 5 : + user_requested_size <= (1 << 16) - 1024 ? 6 : 7; + return Min(Max(rz_log, RZSize2Log(flags()->redzone)), + RZSize2Log(flags()->max_redzone)); +} + +// The memory chunk allocated from the underlying allocator looks like this: +// L L L L L L H H U U U U U U R R +// L -- left redzone words (0 or more bytes) +// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. +// U -- user memory. +// R -- right redzone (0 or more bytes) +// ChunkBase consists of ChunkHeader and other bytes that overlap with user +// memory. + +// If the left redzone is greater than the ChunkHeader size we store a magic +// value in the first uptr word of the memory block and store the address of +// ChunkBase in the next uptr. +// M B L L L L L L L L L H H U U U U U U +// | ^ +// ---------------------| +// M -- magic value kAllocBegMagic +// B -- address of ChunkHeader pointing to the first 'H' +static const uptr kAllocBegMagic = 0xCC6E96B9; + +struct ChunkHeader { + // 1-st 8 bytes. + u32 chunk_state : 8; // Must be first. + u32 alloc_tid : 24; + + u32 free_tid : 24; + u32 from_memalign : 1; + u32 alloc_type : 2; + u32 rz_log : 3; + u32 lsan_tag : 2; + // 2-nd 8 bytes + // This field is used for small sizes. For large sizes it is equal to + // SizeClassMap::kMaxSize and the actual size is stored in the + // SecondaryAllocator's metadata. + u32 user_requested_size; + u32 alloc_context_id; +}; + +struct ChunkBase : ChunkHeader { + // Header2, intersects with user memory. + u32 free_context_id; +}; + +static const uptr kChunkHeaderSize = sizeof(ChunkHeader); +static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; +COMPILER_CHECK(kChunkHeaderSize == 16); +COMPILER_CHECK(kChunkHeader2Size <= 16); + +struct AsanChunk: ChunkBase { + uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } + uptr UsedSize(bool locked_version = false) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + return *reinterpret_cast<uptr *>( + allocator.GetMetaData(AllocBeg(locked_version))); + } + void *AllocBeg(bool locked_version = false) { + if (from_memalign) { + if (locked_version) + return allocator.GetBlockBeginFastLocked( + reinterpret_cast<void *>(this)); + return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); + } + return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); + } + bool AddrIsInside(uptr addr, bool locked_version = false) { + return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + } +}; + +bool AsanChunkView::IsValid() { + return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE; +} +uptr AsanChunkView::Beg() { return chunk_->Beg(); } +uptr AsanChunkView::End() { return Beg() + UsedSize(); } +uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } +uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } +uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +StackTrace AsanChunkView::GetAllocStack() { + return GetStackTraceFromId(chunk_->alloc_context_id); +} + +StackTrace AsanChunkView::GetFreeStack() { + return GetStackTraceFromId(chunk_->free_context_id); +} + +struct QuarantineCallback; +typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; +typedef AsanQuarantine::Cache QuarantineCache; +static AsanQuarantine quarantine(LINKER_INITIALIZED); +static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); +static AllocatorCache fallback_allocator_cache; +static SpinMutex fallback_mutex; + +QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); + return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); +} + +struct QuarantineCallback { + explicit QuarantineCallback(AllocatorCache *cache) + : cache_(cache) { + } + + void Recycle(AsanChunk *m) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); + CHECK_NE(m->alloc_tid, kInvalidTid); + CHECK_NE(m->free_tid, kInvalidTid); + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapLeftRedzoneMagic); + void *p = reinterpret_cast<void *>(m->AllocBeg()); + if (p != m) { + uptr *alloc_magic = reinterpret_cast<uptr *>(p); + CHECK_EQ(alloc_magic[0], kAllocBegMagic); + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + alloc_magic[0] = 0; + CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); + } + + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.real_frees++; + thread_stats.really_freed += m->UsedSize(); + + allocator.Deallocate(cache_, p); + } + + void *Allocate(uptr size) { + return allocator.Allocate(cache_, size, 1, false); + } + + void Deallocate(void *p) { + allocator.Deallocate(cache_, p); + } + + AllocatorCache *cache_; +}; + +void InitializeAllocator() { + allocator.Init(); + quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); +} + +void ReInitializeAllocator() { + quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); +} + +static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, + AllocType alloc_type, bool can_fill) { + if (UNLIKELY(!asan_inited)) + AsanInitFromRtl(); + Flags &fl = *flags(); + CHECK(stack); + const uptr min_alignment = SHADOW_GRANULARITY; + if (alignment < min_alignment) + alignment = min_alignment; + if (size == 0) { + // We'd be happy to avoid allocating memory for zero-size requests, but + // some programs/tests depend on this behavior and assume that malloc would + // not return NULL even for zero-size allocations. Moreover, it looks like + // operator new should never return NULL, and results of consecutive "new" + // calls must be different even if the allocated size is zero. + size = 1; + } + CHECK(IsPowerOfTwo(alignment)); + uptr rz_log = ComputeRZLog(size); + uptr rz_size = RZLog2Size(rz_log); + uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); + uptr needed_size = rounded_size + rz_size; + if (alignment > min_alignment) + needed_size += alignment; + bool using_primary_allocator = true; + // If we are allocating from the secondary allocator, there will be no + // automatic right redzone, so add the right redzone manually. + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + needed_size += rz_size; + using_primary_allocator = false; + } + CHECK(IsAligned(needed_size, min_alignment)); + if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { + Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", + (void*)size); + return AllocatorReturnNull(); + } + + AsanThread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, needed_size, 8, false); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, needed_size, 8, false); + } + + if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) { + // Heap poisoning is enabled, but the allocator provides an unpoisoned + // chunk. This is possible if flags()->poison_heap was disabled for some + // time, for example, due to flags()->start_disabled. + // Anyway, poison the block before using it for anything else. + uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); + PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); + } + + uptr alloc_beg = reinterpret_cast<uptr>(allocated); + uptr alloc_end = alloc_beg + needed_size; + uptr beg_plus_redzone = alloc_beg + rz_size; + uptr user_beg = beg_plus_redzone; + if (!IsAligned(user_beg, alignment)) + user_beg = RoundUpTo(user_beg, alignment); + uptr user_end = user_beg + size; + CHECK_LE(user_end, alloc_end); + uptr chunk_beg = user_beg - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + m->alloc_type = alloc_type; + m->rz_log = rz_log; + u32 alloc_tid = t ? t->tid() : 0; + m->alloc_tid = alloc_tid; + CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? + m->free_tid = kInvalidTid; + m->from_memalign = user_beg != beg_plus_redzone; + if (alloc_beg != chunk_beg) { + CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); + reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; + reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; + } + if (using_primary_allocator) { + CHECK(size); + m->user_requested_size = size; + CHECK(allocator.FromPrimary(allocated)); + } else { + CHECK(!allocator.FromPrimary(allocated)); + m->user_requested_size = SizeClassMap::kMaxSize; + uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); + meta[0] = size; + meta[1] = chunk_beg; + } + + m->alloc_context_id = StackDepotPut(*stack); + + uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); + // Unpoison the bulk of the memory region. + if (size_rounded_down_to_granularity) + PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); + // Deal with the end of the region if size is not aligned to granularity. + if (size != size_rounded_down_to_granularity && fl.poison_heap) { + u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); + *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; + } + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mallocs++; + thread_stats.malloced += size; + thread_stats.malloced_redzones += needed_size - size; + uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); + thread_stats.malloced_by_size[class_id]++; + if (needed_size > SizeClassMap::kMaxSize) + thread_stats.malloc_large++; + + void *res = reinterpret_cast<void *>(user_beg); + if (can_fill && fl.max_malloc_fill_size) { + uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); + REAL(memset)(res, fl.malloc_fill_byte, fill_size); + } +#if CAN_SANITIZE_LEAKS + m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored + : __lsan::kDirectlyLeaked; +#endif + // Must be the last mutation of metadata in this function. + atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); + ASAN_MALLOC_HOOK(res, size); + return res; +} + +static void ReportInvalidFree(void *ptr, u8 chunk_state, + BufferedStackTrace *stack) { + if (chunk_state == CHUNK_QUARANTINE) + ReportDoubleFree((uptr)ptr, stack); + else + ReportFreeNotMalloced((uptr)ptr, stack); +} + +static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { + u8 old_chunk_state = CHUNK_ALLOCATED; + // Flip the chunk_state atomically to avoid race on double-free. + if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, + CHUNK_QUARANTINE, memory_order_acquire)) + ReportInvalidFree(ptr, old_chunk_state, stack); + CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); +} + +// Expects the chunk to already be marked as quarantined by using +// AtomicallySetQuarantineFlag. +static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, + AllocType alloc_type) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + + if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) + ReportAllocTypeMismatch((uptr)ptr, stack, + (AllocType)m->alloc_type, (AllocType)alloc_type); + + CHECK_GE(m->alloc_tid, 0); + if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. + CHECK_EQ(m->free_tid, kInvalidTid); + AsanThread *t = GetCurrentThread(); + m->free_tid = t ? t->tid() : 0; + m->free_context_id = StackDepotPut(*stack); + // Poison the region. + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapFreeMagic); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.frees++; + thread_stats.freed += m->UsedSize(); + + // Push into quarantine. + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), + m, m->UsedSize()); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *ac = &fallback_allocator_cache; + quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), + m, m->UsedSize()); + } +} + +static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, + AllocType alloc_type) { + uptr p = reinterpret_cast<uptr>(ptr); + if (p == 0) return; + + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + if (delete_size && flags()->new_delete_type_mismatch && + delete_size != m->UsedSize()) { + ReportNewDeleteSizeMismatch(p, delete_size, stack); + } + ASAN_FREE_HOOK(ptr); + // Must mark the chunk as quarantined before any changes to its metadata. + AtomicallySetQuarantineFlag(m, ptr, stack); + QuarantineChunk(m, ptr, stack, alloc_type); +} + +static void *Reallocate(void *old_ptr, uptr new_size, + BufferedStackTrace *stack) { + CHECK(old_ptr && new_size); + uptr p = reinterpret_cast<uptr>(old_ptr); + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.reallocs++; + thread_stats.realloced += new_size; + + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + if (new_ptr) { + u8 chunk_state = m->chunk_state; + if (chunk_state != CHUNK_ALLOCATED) + ReportInvalidFree(old_ptr, chunk_state, stack); + CHECK_NE(REAL(memcpy), (void*)0); + uptr memcpy_size = Min(new_size, m->UsedSize()); + // If realloc() races with free(), we may start copying freed memory. + // However, we will report racy double-free later anyway. + REAL(memcpy)(new_ptr, old_ptr, memcpy_size); + Deallocate(old_ptr, 0, stack, FROM_MALLOC); + } + return new_ptr; +} + +// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). +static AsanChunk *GetAsanChunk(void *alloc_beg) { + if (!alloc_beg) return 0; + if (!allocator.FromPrimary(alloc_beg)) { + uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); + AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); + return m; + } + uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); + if (alloc_magic[0] == kAllocBegMagic) + return reinterpret_cast<AsanChunk *>(alloc_magic[1]); + return reinterpret_cast<AsanChunk *>(alloc_beg); +} + +static AsanChunk *GetAsanChunkByAddr(uptr p) { + void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); + return GetAsanChunk(alloc_beg); +} + +// Allocator must be locked when this function is called. +static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { + void *alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); + return GetAsanChunk(alloc_beg); +} + +static uptr AllocationSize(uptr p) { + AsanChunk *m = GetAsanChunkByAddr(p); + if (!m) return 0; + if (m->chunk_state != CHUNK_ALLOCATED) return 0; + if (m->Beg() != p) return 0; + return m->UsedSize(); +} + +// We have an address between two chunks, and we want to report just one. +AsanChunk *ChooseChunk(uptr addr, + AsanChunk *left_chunk, AsanChunk *right_chunk) { + // Prefer an allocated chunk over freed chunk and freed chunk + // over available chunk. + if (left_chunk->chunk_state != right_chunk->chunk_state) { + if (left_chunk->chunk_state == CHUNK_ALLOCATED) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_ALLOCATED) + return right_chunk; + if (left_chunk->chunk_state == CHUNK_QUARANTINE) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_QUARANTINE) + return right_chunk; + } + // Same chunk_state: choose based on offset. + sptr l_offset = 0, r_offset = 0; + CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); + CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); + if (l_offset < r_offset) + return left_chunk; + return right_chunk; +} + +AsanChunkView FindHeapChunkByAddress(uptr addr) { + AsanChunk *m1 = GetAsanChunkByAddr(addr); + if (!m1) return AsanChunkView(m1); + sptr offset = 0; + if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { + // The address is in the chunk's left redzone, so maybe it is actually + // a right buffer overflow from the other chunk to the left. + // Search a bit to the left to see if there is another chunk. + AsanChunk *m2 = 0; + for (uptr l = 1; l < GetPageSizeCached(); l++) { + m2 = GetAsanChunkByAddr(addr - l); + if (m2 == m1) continue; // Still the same chunk. + break; + } + if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) + m1 = ChooseChunk(addr, m2, m1); + } + return AsanChunkView(m1); +} + +void AsanThreadLocalMallocStorage::CommitBack() { + AllocatorCache *ac = GetAllocatorCache(this); + quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); + allocator.SwallowCache(GetAllocatorCache(this)); +} + +void PrintInternalAllocatorStats() { + allocator.PrintStats(); +} + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + return Allocate(size, alignment, stack, alloc_type, true); +} + +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { + Deallocate(ptr, 0, stack, alloc_type); +} + +void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + Deallocate(ptr, size, stack, alloc_type); +} + +void *asan_malloc(uptr size, BufferedStackTrace *stack) { + return Allocate(size, 8, stack, FROM_MALLOC, true); +} + +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) + return AllocatorReturnNull(); + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + // If the memory comes from the secondary allocator no need to clear it + // as it comes directly from mmap. + if (ptr && allocator.FromPrimary(ptr)) + REAL(memset)(ptr, 0, nmemb * size); + return ptr; +} + +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { + if (p == 0) + return Allocate(size, 8, stack, FROM_MALLOC, true); + if (size == 0) { + Deallocate(p, 0, stack, FROM_MALLOC); + return 0; + } + return Reallocate(p, size, stack); +} + +void *asan_valloc(uptr size, BufferedStackTrace *stack) { + return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); +} + +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + size = RoundUpTo(size, PageSize); + if (size == 0) { + // pvalloc(0) should allocate one page. + size = PageSize; + } + return Allocate(size, PageSize, stack, FROM_MALLOC, true); +} + +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack) { + void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) { + if (ptr == 0) return 0; + uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); + if (flags()->check_malloc_usable_size && (usable_size == 0)) { + GET_STACK_TRACE_FATAL(pc, bp); + ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); + } + return usable_size; +} + +uptr asan_mz_size(const void *ptr) { + return AllocationSize(reinterpret_cast<uptr>(ptr)); +} + +void asan_mz_force_lock() { + allocator.ForceLock(); + fallback_mutex.Lock(); +} + +void asan_mz_force_unlock() { + fallback_mutex.Unlock(); + allocator.ForceUnlock(); +} + +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +void LockAllocator() { + __asan::allocator.ForceLock(); +} + +void UnlockAllocator() { + __asan::allocator.ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&__asan::allocator; + *end = *begin + sizeof(__asan::allocator); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); + if (!m) return 0; + uptr chunk = m->Beg(); + if (m->chunk_state != __asan::CHUNK_ALLOCATED) + return 0; + if (m->AddrIsInside(addr, /*locked_version=*/true)) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), + addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + __asan::AsanChunk *m = + __asan::GetAsanChunkByAddrFastLocked(chunk); + CHECK(m); + return m->Beg(); +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); +} + +bool LsanMetadata::allocated() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->chunk_state == __asan::CHUNK_ALLOCATED; +} + +ChunkTag LsanMetadata::tag() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return static_cast<ChunkTag>(m->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + m->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->UsedSize(/*locked_version=*/true); +} + +u32 LsanMetadata::stack_trace_id() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->alloc_context_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::allocator.ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); + if (!m) return kIgnoreObjectInvalid; + if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { + if (m->lsan_tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->lsan_tag = __lsan::kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// ASan allocator doesn't reserve extra bytes, so normally we would +// just return "size". We don't want to expose our redzone sizes, etc here. +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + uptr ptr = reinterpret_cast<uptr>(p); + return (AllocationSize(ptr) > 0); +} + +uptr __sanitizer_get_allocated_size(const void *p) { + if (p == 0) return 0; + uptr ptr = reinterpret_cast<uptr>(p); + uptr allocated_size = AllocationSize(ptr); + // Die if p is not malloced or if it is already freed. + if (allocated_size == 0) { + GET_STACK_TRACE_FATAL_HERE; + ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); + } + return allocated_size; +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_free_hook(void *ptr) { + (void)ptr; +} +} // extern "C" +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_blacklist.txt b/contrib/compiler-rt/lib/asan/asan_blacklist.txt new file mode 100644 index 000000000000..c25921fd5fe7 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_blacklist.txt @@ -0,0 +1,13 @@ +# Blacklist for AddressSanitizer. Turns off instrumentation of particular +# functions or sources. Use with care. You may set location of blacklist +# at compile-time using -fsanitize-blacklist=<path> flag. + +# Example usage: +# fun:*bad_function_name* +# src:file_with_tricky_code.cc +# global:*global_with_bad_access_or_initialization* +# global:*global_with_initialization_issues*=init +# type:*Namespace::ClassName*=init + +# Stack buffer overflow in VC/INCLUDE/xlocnum, see http://goo.gl/L4qqUG +fun:*_Find_elem@*@std* diff --git a/contrib/compiler-rt/lib/asan/asan_debugging.cc b/contrib/compiler-rt/lib/asan/asan_debugging.cc new file mode 100644 index 000000000000..2b66dd5265fc --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_debugging.cc @@ -0,0 +1,141 @@ +//===-- asan_debugging.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file contains various functions that are generally useful to call when +// using a debugger (LLDB, GDB). +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_thread.h" + +namespace __asan { + +void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) { + descr->name[0] = 0; + descr->region_address = 0; + descr->region_size = 0; + descr->region_kind = "stack"; + + AsanThread::StackFrameAccess access; + if (!t->GetStackFrameAccessByAddr(addr, &access)) + return; + InternalMmapVector<StackVarDescr> vars(16); + if (!ParseFrameDescription(access.frame_descr, &vars)) { + return; + } + + for (uptr i = 0; i < vars.size(); i++) { + if (access.offset <= vars[i].beg + vars[i].size) { + internal_strncat(descr->name, vars[i].name_pos, + Min(descr->name_size, vars[i].name_len)); + descr->region_address = addr - (access.offset - vars[i].beg); + descr->region_size = vars[i].size; + return; + } + } +} + +void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + + descr->name[0] = 0; + descr->region_address = 0; + descr->region_size = 0; + + if (!chunk.IsValid()) { + descr->region_kind = "heap-invalid"; + return; + } + + descr->region_address = chunk.Beg(); + descr->region_size = chunk.UsedSize(); + descr->region_kind = "heap"; +} + +void AsanLocateAddress(uptr addr, AddressDescription *descr) { + if (DescribeAddressIfShadow(addr, descr, /* print */ false)) { + return; + } + if (GetInfoForAddressIfGlobal(addr, descr)) { + return; + } + asanThreadRegistry().Lock(); + AsanThread *thread = FindThreadByStackAddress(addr); + asanThreadRegistry().Unlock(); + if (thread) { + GetInfoForStackVar(addr, descr, thread); + return; + } + GetInfoForHeapAddress(addr, descr); +} + +uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, + bool alloc_stack) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + if (!chunk.IsValid()) return 0; + + StackTrace stack(nullptr, 0); + if (alloc_stack) { + if (chunk.AllocTid() == kInvalidTid) return 0; + stack = chunk.GetAllocStack(); + if (thread_id) *thread_id = chunk.AllocTid(); + } else { + if (chunk.FreeTid() == kInvalidTid) return 0; + stack = chunk.GetFreeStack(); + if (thread_id) *thread_id = chunk.FreeTid(); + } + + if (trace && size) { + size = Min(size, Min(stack.size, kStackTraceMax)); + for (uptr i = 0; i < size; i++) + trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]); + + return size; + } + + return 0; +} + +} // namespace __asan + +using namespace __asan; + +SANITIZER_INTERFACE_ATTRIBUTE +const char *__asan_locate_address(uptr addr, char *name, uptr name_size, + uptr *region_address, uptr *region_size) { + AddressDescription descr = { name, name_size, 0, 0, 0 }; + AsanLocateAddress(addr, &descr); + if (region_address) *region_address = descr.region_address; + if (region_size) *region_size = descr.region_size; + return descr.region_kind; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { + return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true); +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { + return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) { + if (shadow_scale) + *shadow_scale = SHADOW_SCALE; + if (shadow_offset) + *shadow_offset = SHADOW_OFFSET; +} diff --git a/contrib/compiler-rt/lib/asan/asan_fake_stack.cc b/contrib/compiler-rt/lib/asan/asan_fake_stack.cc new file mode 100644 index 000000000000..c95bc11f6cd3 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_fake_stack.cc @@ -0,0 +1,257 @@ +//===-- asan_fake_stack.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// FakeStack is used to detect use-after-return bugs. +//===----------------------------------------------------------------------===// +#include "asan_allocator.h" +#include "asan_poisoning.h" +#include "asan_thread.h" + +namespace __asan { + +static const u64 kMagic1 = kAsanStackAfterReturnMagic; +static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; +static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; +static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; + +// For small size classes inline PoisonShadow for better performance. +ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { + CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3. + u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); + if (class_id <= 6) { + for (uptr i = 0; i < (1U << class_id); i++) { + shadow[i] = magic; + SanitizerBreakOptimization(0); // Make sure this does not become memset. + } + } else { + // The size class is too big, it's cheaper to poison only size bytes. + PoisonShadow(ptr, size, static_cast<u8>(magic)); + } +} + +FakeStack *FakeStack::Create(uptr stack_size_log) { + static uptr kMinStackSizeLog = 16; + static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); + if (stack_size_log < kMinStackSizeLog) + stack_size_log = kMinStackSizeLog; + if (stack_size_log > kMaxStackSizeLog) + stack_size_log = kMaxStackSizeLog; + uptr size = RequiredSize(stack_size_log); + FakeStack *res = reinterpret_cast<FakeStack *>( + flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") + : MmapOrDie(size, "FakeStack")); + res->stack_size_log_ = stack_size_log; + u8 *p = reinterpret_cast<u8 *>(res); + VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " + "mmapped %zdK, noreserve=%d \n", + GetCurrentTidOrInvalid(), p, + p + FakeStack::RequiredSize(stack_size_log), stack_size_log, + size >> 10, flags()->uar_noreserve); + return res; +} + +void FakeStack::Destroy(int tid) { + PoisonAll(0); + if (common_flags()->verbosity >= 2) { + InternalScopedString str(kNumberOfSizeClasses * 50); + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) + str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], + NumberOfFrames(stack_size_log(), class_id)); + Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); + } + uptr size = RequiredSize(stack_size_log_); + FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size); + UnmapOrDie(this, size); +} + +void FakeStack::PoisonAll(u8 magic) { + PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()), + magic); +} + +ALWAYS_INLINE USED +FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, + uptr real_stack) { + CHECK_LT(class_id, kNumberOfSizeClasses); + if (needs_gc_) + GC(real_stack); + uptr &hint_position = hint_position_[class_id]; + const int num_iter = NumberOfFrames(stack_size_log, class_id); + u8 *flags = GetFlags(stack_size_log, class_id); + for (int i = 0; i < num_iter; i++) { + uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); + // This part is tricky. On one hand, checking and setting flags[pos] + // should be atomic to ensure async-signal safety. But on the other hand, + // if the signal arrives between checking and setting flags[pos], the + // signal handler's fake stack will start from a different hint_position + // and so will not touch this particular byte. So, it is safe to do this + // with regular non-atimic load and store (at least I was not able to make + // this code crash). + if (flags[pos]) continue; + flags[pos] = 1; + FakeFrame *res = reinterpret_cast<FakeFrame *>( + GetFrame(stack_size_log, class_id, pos)); + res->real_stack = real_stack; + *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos]; + return res; + } + return 0; // We are out of fake stack. +} + +uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { + uptr stack_size_log = this->stack_size_log(); + uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0)); + uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log); + if (ptr < beg || ptr >= end) return 0; + uptr class_id = (ptr - beg) >> stack_size_log; + uptr base = beg + (class_id << stack_size_log); + CHECK_LE(base, ptr); + CHECK_LT(ptr, base + (1UL << stack_size_log)); + uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); + uptr res = base + pos * BytesInSizeClass(class_id); + *frame_end = res + BytesInSizeClass(class_id); + *frame_beg = res + sizeof(FakeFrame); + return res; +} + +void FakeStack::HandleNoReturn() { + needs_gc_ = true; +} + +// When throw, longjmp or some such happens we don't call OnFree() and +// as the result may leak one or more fake frames, but the good news is that +// we are notified about all such events by HandleNoReturn(). +// If we recently had such no-return event we need to collect garbage frames. +// We do it based on their 'real_stack' values -- everything that is lower +// than the current real_stack is garbage. +NOINLINE void FakeStack::GC(uptr real_stack) { + uptr collected = 0; + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { + u8 *flags = GetFlags(stack_size_log(), class_id); + for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; + i++) { + if (flags[i] == 0) continue; // not allocated. + FakeFrame *ff = reinterpret_cast<FakeFrame *>( + GetFrame(stack_size_log(), class_id, i)); + if (ff->real_stack < real_stack) { + flags[i] = 0; + collected++; + } + } + } + needs_gc_ = false; +} + +void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { + u8 *flags = GetFlags(stack_size_log(), class_id); + for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; + i++) { + if (flags[i] == 0) continue; // not allocated. + FakeFrame *ff = reinterpret_cast<FakeFrame *>( + GetFrame(stack_size_log(), class_id, i)); + uptr begin = reinterpret_cast<uptr>(ff); + callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); + } + } +} + +#if SANITIZER_LINUX && !SANITIZER_ANDROID +static THREADLOCAL FakeStack *fake_stack_tls; + +FakeStack *GetTLSFakeStack() { + return fake_stack_tls; +} +void SetTLSFakeStack(FakeStack *fs) { + fake_stack_tls = fs; +} +#else +FakeStack *GetTLSFakeStack() { return 0; } +void SetTLSFakeStack(FakeStack *fs) { } +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID + +static FakeStack *GetFakeStack() { + AsanThread *t = GetCurrentThread(); + if (!t) return 0; + return t->fake_stack(); +} + +static FakeStack *GetFakeStackFast() { + if (FakeStack *fs = GetTLSFakeStack()) + return fs; + if (!__asan_option_detect_stack_use_after_return) + return 0; + return GetFakeStack(); +} + +ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) { + FakeStack *fs = GetFakeStackFast(); + if (!fs) return real_stack; + FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); + if (!ff) + return real_stack; // Out of fake stack, return the real one. + uptr ptr = reinterpret_cast<uptr>(ff); + SetShadow(ptr, size, class_id, 0); + return ptr; +} + +ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) { + if (ptr == real_stack) + return; + FakeStack::Deallocate(ptr, class_id); + SetShadow(ptr, size, class_id, kMagic8); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; +#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ + __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \ + return OnMalloc(class_id, size, real_stack); \ + } \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ + uptr ptr, uptr size, uptr real_stack) { \ + OnFree(ptr, class_id, size, real_stack); \ + } + +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } + +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, + void **end) { + FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); + if (!fs) return 0; + uptr frame_beg, frame_end; + FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( + reinterpret_cast<uptr>(addr), &frame_beg, &frame_end)); + if (!frame) return 0; + if (frame->magic != kCurrentStackFrameMagic) + return 0; + if (beg) *beg = reinterpret_cast<void*>(frame_beg); + if (end) *end = reinterpret_cast<void*>(frame_end); + return reinterpret_cast<void*>(frame->real_stack); +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/asan/asan_fake_stack.h b/contrib/compiler-rt/lib/asan/asan_fake_stack.h new file mode 100644 index 000000000000..3b1d9eb3b57e --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_fake_stack.h @@ -0,0 +1,175 @@ +//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_fake_stack.cc, implements FakeStack. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_FAKE_STACK_H +#define ASAN_FAKE_STACK_H + +#include "sanitizer_common/sanitizer_common.h" + +namespace __asan { + +// Fake stack frame contains local variables of one function. +struct FakeFrame { + uptr magic; // Modified by the instrumented code. + uptr descr; // Modified by the instrumented code. + uptr pc; // Modified by the instrumented code. + uptr real_stack; +}; + +// For each thread we create a fake stack and place stack objects on this fake +// stack instead of the real stack. The fake stack is not really a stack but +// a fast malloc-like allocator so that when a function exits the fake stack +// is not popped but remains there for quite some time until gets used again. +// So, we poison the objects on the fake stack when function returns. +// It helps us find use-after-return bugs. +// +// The FakeStack objects is allocated by a single mmap call and has no other +// pointers. The size of the fake stack depends on the actual thread stack size +// and thus can not be a constant. +// stack_size is a power of two greater or equal to the thread's stack size; +// we store it as its logarithm (stack_size_log). +// FakeStack has kNumberOfSizeClasses (11) size classes, each size class +// is a power of two, starting from 64 bytes. Each size class occupies +// stack_size bytes and thus can allocate +// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2). +// For each size class we have NumberOfFrames allocation flags, +// each flag indicates whether the given frame is currently allocated. +// All flags for size classes 0 .. 10 are stored in a single contiguous region +// followed by another contiguous region which contains the actual memory for +// size classes. The addresses are computed by GetFlags and GetFrame without +// any memory accesses solely based on 'this' and stack_size_log. +// Allocate() flips the appropriate allocation flag atomically, thus achieving +// async-signal safety. +// This allocator does not have quarantine per se, but it tries to allocate the +// frames in round robin fasion to maximize the delay between a deallocation +// and the next allocation. +class FakeStack { + static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B. + static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. + + public: + static const uptr kNumberOfSizeClasses = + kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1; + + // CTOR: create the FakeStack as a single mmap-ed object. + static FakeStack *Create(uptr stack_size_log); + + void Destroy(int tid); + + // stack_size_log is at least 15 (stack_size >= 32K). + static uptr SizeRequiredForFlags(uptr stack_size_log) { + return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog); + } + + // Each size class occupies stack_size bytes. + static uptr SizeRequiredForFrames(uptr stack_size_log) { + return (1ULL << stack_size_log) * kNumberOfSizeClasses; + } + + // Number of bytes requires for the whole object. + static uptr RequiredSize(uptr stack_size_log) { + return kFlagsOffset + SizeRequiredForFlags(stack_size_log) + + SizeRequiredForFrames(stack_size_log); + } + + // Offset of the given flag from the first flag. + // The flags for class 0 begin at offset 000000000 + // The flags for class 1 begin at offset 100000000 + // ....................2................ 110000000 + // ....................3................ 111000000 + // and so on. + static uptr FlagsOffset(uptr stack_size_log, uptr class_id) { + uptr t = kNumberOfSizeClasses - 1 - class_id; + const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1; + return ((all_ones >> t) << t) << (stack_size_log - 15); + } + + static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) { + return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id); + } + + // Divide n by the numbe of frames in size class. + static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) { + return n & (NumberOfFrames(stack_size_log, class_id) - 1); + } + + // The the pointer to the flags of the given class_id. + u8 *GetFlags(uptr stack_size_log, uptr class_id) { + return reinterpret_cast<u8 *>(this) + kFlagsOffset + + FlagsOffset(stack_size_log, class_id); + } + + // Get frame by class_id and pos. + u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) { + return reinterpret_cast<u8 *>(this) + kFlagsOffset + + SizeRequiredForFlags(stack_size_log) + + (1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos; + } + + // Allocate the fake frame. + FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack); + + // Deallocate the fake frame: read the saved flag address and write 0 there. + static void Deallocate(uptr x, uptr class_id) { + **SavedFlagPtr(x, class_id) = 0; + } + + // Poison the entire FakeStack's shadow with the magic value. + void PoisonAll(u8 magic); + + // Return the beginning of the FakeFrame or 0 if the address is not ours. + uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end); + USED uptr AddrIsInFakeStack(uptr addr) { + uptr t1, t2; + return AddrIsInFakeStack(addr, &t1, &t2); + } + + // Number of bytes in a fake frame of this size class. + static uptr BytesInSizeClass(uptr class_id) { + return 1UL << (class_id + kMinStackFrameSizeLog); + } + + // The fake frame is guaranteed to have a right redzone. + // We use the last word of that redzone to store the address of the flag + // that corresponds to the current frame to make faster deallocation. + static u8 **SavedFlagPtr(uptr x, uptr class_id) { + return reinterpret_cast<u8 **>(x + BytesInSizeClass(class_id) - sizeof(x)); + } + + uptr stack_size_log() const { return stack_size_log_; } + + void HandleNoReturn(); + void GC(uptr real_stack); + + void ForEachFakeFrame(RangeIteratorCallback callback, void *arg); + + private: + FakeStack() { } + static const uptr kFlagsOffset = 4096; // This is were the flags begin. + // Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID + COMPILER_CHECK(kNumberOfSizeClasses == 11); + static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog; + + uptr hint_position_[kNumberOfSizeClasses]; + uptr stack_size_log_; + // a bit is set if something was allocated from the corresponding size class. + bool needs_gc_; +}; + +FakeStack *GetTLSFakeStack(); +void SetTLSFakeStack(FakeStack *fs); + +} // namespace __asan + +#endif // ASAN_FAKE_STACK_H diff --git a/contrib/compiler-rt/lib/asan/asan_flags.h b/contrib/compiler-rt/lib/asan/asan_flags.h new file mode 100644 index 000000000000..3df4dd3050bc --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_flags.h @@ -0,0 +1,79 @@ +//===-- asan_flags.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan runtime flags. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_FLAGS_H +#define ASAN_FLAGS_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +// ASan flag values can be defined in four ways: +// 1) initialized with default values at startup. +// 2) overriden during compilation of ASan runtime by providing +// compile definition ASAN_DEFAULT_OPTIONS. +// 3) overriden from string returned by user-specified function +// __asan_default_options(). +// 4) overriden from env variable ASAN_OPTIONS. + +namespace __asan { + +struct Flags { + // Flag descriptions are in asan_rtl.cc. + int quarantine_size; + int redzone; + int max_redzone; + bool debug; + int report_globals; + bool check_initialization_order; + bool replace_str; + bool replace_intrin; + bool mac_ignore_invalid_free; + bool detect_stack_use_after_return; + int min_uar_stack_size_log; + int max_uar_stack_size_log; + bool uar_noreserve; + int max_malloc_fill_size, malloc_fill_byte; + int exitcode; + bool allow_user_poisoning; + int sleep_before_dying; + bool check_malloc_usable_size; + bool unmap_shadow_on_exit; + bool abort_on_error; + bool print_stats; + bool print_legend; + bool atexit; + bool allow_reexec; + bool print_full_thread_history; + bool poison_heap; + bool poison_partial; + bool poison_array_cookie; + bool alloc_dealloc_mismatch; + bool new_delete_type_mismatch; + bool strict_memcmp; + bool strict_init_order; + bool start_deactivated; + int detect_invalid_pointer_pairs; + bool detect_container_overflow; + int detect_odr_violation; + bool dump_instruction_bytes; +}; + +extern Flags asan_flags_dont_use_directly; +inline Flags *flags() { + return &asan_flags_dont_use_directly; +} +void InitializeFlags(Flags *f, const char *env); + +} // namespace __asan + +#endif // ASAN_FLAGS_H diff --git a/contrib/compiler-rt/lib/asan/asan_globals.cc b/contrib/compiler-rt/lib/asan/asan_globals.cc new file mode 100644 index 000000000000..be111d4fb4cf --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_globals.cc @@ -0,0 +1,293 @@ +//===-- asan_globals.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Handle globals. +//===----------------------------------------------------------------------===// +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +typedef __asan_global Global; + +struct ListOfGlobals { + const Global *g; + ListOfGlobals *next; +}; + +static BlockingMutex mu_for_globals(LINKER_INITIALIZED); +static LowLevelAllocator allocator_for_globals; +static ListOfGlobals *list_of_all_globals; + +static const int kDynamicInitGlobalsInitialCapacity = 512; +struct DynInitGlobal { + Global g; + bool initialized; +}; +typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals; +// Lazy-initialized and never deleted. +static VectorOfGlobals *dynamic_init_globals; + +// We want to remember where a certain range of globals was registered. +struct GlobalRegistrationSite { + u32 stack_id; + Global *g_first, *g_last; +}; +typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector; +static GlobalRegistrationSiteVector *global_registration_site_vector; + +ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { + FastPoisonShadow(g->beg, g->size_with_redzone, value); +} + +ALWAYS_INLINE void PoisonRedZones(const Global &g) { + uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); + FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, + kAsanGlobalRedzoneMagic); + if (g.size != aligned_size) { + FastPoisonShadowPartialRightRedzone( + g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), + g.size % SHADOW_GRANULARITY, + SHADOW_GRANULARITY, + kAsanGlobalRedzoneMagic); + } +} + +const uptr kMinimalDistanceFromAnotherGlobal = 64; + +bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { + if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; + if (addr >= g.beg + g.size_with_redzone) return false; + return true; +} + +static void ReportGlobal(const Global &g, const char *prefix) { + Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n", + prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name, + g.module_name, g.has_dynamic_init); + if (g.location) { + Report(" location (%p): name=%s[%p], %d %d\n", g.location, + g.location->filename, g.location->filename, g.location->line_no, + g.location->column_no); + } +} + +static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print, + Global *output_global) { + if (!flags()->report_globals) return false; + BlockingMutexLock lock(&mu_for_globals); + bool res = false; + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + const Global &g = *l->g; + if (print) { + if (flags()->report_globals >= 2) + ReportGlobal(g, "Search"); + res |= DescribeAddressRelativeToGlobal(addr, size, g); + } else { + if (IsAddressNearGlobal(addr, g)) { + CHECK(output_global); + *output_global = g; + return true; + } + } + } + return res; +} + +bool DescribeAddressIfGlobal(uptr addr, uptr size) { + return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true, + /* output_global */ nullptr); +} + +bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) { + Global g = {}; + if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) { + internal_strncpy(descr->name, g.name, descr->name_size); + descr->region_address = g.beg; + descr->region_size = g.size; + descr->region_kind = "global"; + return true; + } + return false; +} + +u32 FindRegistrationSite(const Global *g) { + CHECK(global_registration_site_vector); + for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { + GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; + if (g >= grs.g_first && g <= grs.g_last) + return grs.stack_id; + } + return 0; +} + +// Register a global variable. +// This function may be called more than once for every global +// so we store the globals in a map. +static void RegisterGlobal(const Global *g) { + CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Added"); + CHECK(flags()->report_globals); + CHECK(AddrIsInMem(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); + if (flags()->detect_odr_violation) { + // Try detecting ODR (One Definition Rule) violation, i.e. the situation + // where two globals with the same name are defined in different modules. + if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { + // This check may not be enough: if the first global is much larger + // the entire redzone of the second global may be within the first global. + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + if (g->beg == l->g->beg && + (flags()->detect_odr_violation >= 2 || g->size != l->g->size)) + ReportODRViolation(g, FindRegistrationSite(g), + l->g, FindRegistrationSite(l->g)); + } + } + } + if (flags()->poison_heap) + PoisonRedZones(*g); + ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; + l->g = g; + l->next = list_of_all_globals; + list_of_all_globals = l; + if (g->has_dynamic_init) { + if (dynamic_init_globals == 0) { + dynamic_init_globals = new(allocator_for_globals) + VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); + } + DynInitGlobal dyn_global = { *g, false }; + dynamic_init_globals->push_back(dyn_global); + } +} + +static void UnregisterGlobal(const Global *g) { + CHECK(asan_inited); + CHECK(flags()->report_globals); + CHECK(AddrIsInMem(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); + if (flags()->poison_heap) + PoisonShadowForGlobal(g, 0); + // We unpoison the shadow memory for the global but we do not remove it from + // the list because that would require O(n^2) time with the current list + // implementation. It might not be worth doing anyway. +} + +void StopInitOrderChecking() { + BlockingMutexLock lock(&mu_for_globals); + if (!flags()->check_initialization_order || !dynamic_init_globals) + return; + flags()->check_initialization_order = false; + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// Register an array of globals. +void __asan_register_globals(__asan_global *globals, uptr n) { + if (!flags()->report_globals) return; + GET_STACK_TRACE_FATAL_HERE; + u32 stack_id = StackDepotPut(stack); + BlockingMutexLock lock(&mu_for_globals); + if (!global_registration_site_vector) + global_registration_site_vector = + new(allocator_for_globals) GlobalRegistrationSiteVector(128); + GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; + global_registration_site_vector->push_back(site); + if (flags()->report_globals >= 2) { + PRINT_CURRENT_STACK(); + Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]); + } + for (uptr i = 0; i < n; i++) { + RegisterGlobal(&globals[i]); + } +} + +// Unregister an array of globals. +// We must do this when a shared objects gets dlclosed. +void __asan_unregister_globals(__asan_global *globals, uptr n) { + if (!flags()->report_globals) return; + BlockingMutexLock lock(&mu_for_globals); + for (uptr i = 0; i < n; i++) { + UnregisterGlobal(&globals[i]); + } +} + +// This method runs immediately prior to dynamic initialization in each TU, +// when all dynamically initialized globals are unpoisoned. This method +// poisons all global variables not defined in this TU, so that a dynamic +// initializer can only touch global variables in the same TU. +void __asan_before_dynamic_init(const char *module_name) { + if (!flags()->check_initialization_order || + !flags()->poison_heap) + return; + bool strict_init_order = flags()->strict_init_order; + CHECK(dynamic_init_globals); + CHECK(module_name); + CHECK(asan_inited); + BlockingMutexLock lock(&mu_for_globals); + if (flags()->report_globals >= 3) + Printf("DynInitPoison module: %s\n", module_name); + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (dyn_g.initialized) + continue; + if (g->module_name != module_name) + PoisonShadowForGlobal(g, kAsanInitializationOrderMagic); + else if (!strict_init_order) + dyn_g.initialized = true; + } +} + +// This method runs immediately after dynamic initialization in each TU, when +// all dynamically initialized globals except for those defined in the current +// TU are poisoned. It simply unpoisons all dynamically initialized globals. +void __asan_after_dynamic_init() { + if (!flags()->check_initialization_order || + !flags()->poison_heap) + return; + CHECK(asan_inited); + BlockingMutexLock lock(&mu_for_globals); + // FIXME: Optionally report that we're unpoisoning globals from a module. + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (!dyn_g.initialized) { + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } + } +} diff --git a/contrib/compiler-rt/lib/asan/asan_init_version.h b/contrib/compiler-rt/lib/asan/asan_init_version.h new file mode 100644 index 000000000000..77aea81bd298 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_init_version.h @@ -0,0 +1,32 @@ +//===-- asan_init_version.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This header defines a versioned __asan_init function to be called at the +// startup of the instrumented program. +//===----------------------------------------------------------------------===// +#ifndef ASAN_INIT_VERSION_H +#define ASAN_INIT_VERSION_H + +extern "C" { + // Every time the ASan ABI changes we also change the version number in the + // __asan_init function name. Objects built with incompatible ASan ABI + // versions will not link with run-time. + // Changes between ABI versions: + // v1=>v2: added 'module_name' to __asan_global + // v2=>v3: stack frame description (created by the compiler) + // contains the function PC as the 3-rd field (see + // DescribeAddressIfStack). + // v3=>v4: added '__asan_global_source_location' to __asan_global. + #define __asan_init __asan_init_v4 + #define __asan_init_name "__asan_init_v4" +} + +#endif // ASAN_INIT_VERSION_H diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors.cc b/contrib/compiler-rt/lib/asan/asan_interceptors.cc new file mode 100644 index 000000000000..910cd3addcb0 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interceptors.cc @@ -0,0 +1,924 @@ +//===-- asan_interceptors.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Intercept various libc functions. +//===----------------------------------------------------------------------===// +#include "asan_interceptors.h" + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "sanitizer_common/sanitizer_libc.h" + +namespace __asan { + +// Return true if we can quickly decide that the region is unpoisoned. +static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { + if (size == 0) return true; + if (size <= 32) + return !AddressIsPoisoned(beg) && + !AddressIsPoisoned(beg + size - 1) && + !AddressIsPoisoned(beg + size / 2); + return false; +} + +struct AsanInterceptorContext { + const char *interceptor_name; +}; + +// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, +// and ASAN_WRITE_RANGE as macro instead of function so +// that no extra frames are created, and stack trace contains +// relevant information only. +// We check all shadow bytes. +#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \ + uptr __offset = (uptr)(offset); \ + uptr __size = (uptr)(size); \ + uptr __bad = 0; \ + if (__offset > __offset + __size) { \ + GET_STACK_TRACE_FATAL_HERE; \ + ReportStringFunctionSizeOverflow(__offset, __size, &stack); \ + } \ + if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ + (__bad = __asan_region_is_poisoned(__offset, __size))) { \ + AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \ + bool suppressed = false; \ + if (_ctx) { \ + suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \ + if (!suppressed && HaveStackTraceBasedSuppressions()) { \ + GET_STACK_TRACE_FATAL_HERE; \ + suppressed = IsStackTraceSuppressed(&stack); \ + } \ + } \ + if (!suppressed) { \ + GET_CURRENT_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \ + } \ + } \ + } while (0) + +#define ASAN_READ_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, false) +#define ASAN_WRITE_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, true) + +// Behavior of functions like "memcpy" or "strcpy" is undefined +// if memory intervals overlap. We report error in this case. +// Macro is used to avoid creation of new frames. +static inline bool RangesOverlap(const char *offset1, uptr length1, + const char *offset2, uptr length2) { + return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1)); +} +#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \ + const char *offset1 = (const char*)_offset1; \ + const char *offset2 = (const char*)_offset2; \ + if (RangesOverlap(offset1, length1, offset2, length2)) { \ + GET_STACK_TRACE_FATAL_HERE; \ + ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ + offset2, length2, &stack); \ + } \ +} while (0) + +static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { +#if ASAN_INTERCEPT_STRNLEN + if (REAL(strnlen) != 0) { + return REAL(strnlen)(s, maxlen); + } +#endif + return internal_strnlen(s, maxlen); +} + +void SetThreadName(const char *name) { + AsanThread *t = GetCurrentThread(); + if (t) + asanThreadRegistry().SetThreadName(t->tid(), name); +} + +int OnExit() { + // FIXME: ask frontend whether we need to return failure. + return 0; +} + +} // namespace __asan + +// ---------------------- Wrappers ---------------- {{{1 +using namespace __asan; // NOLINT + +DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) +DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) + +#if !SANITIZER_MAC +#define ASAN_INTERCEPT_FUNC(name) \ + do { \ + if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ + VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \ + } while (0) +#else +// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION. +#define ASAN_INTERCEPT_FUNC(name) +#endif // SANITIZER_MAC + +#define ASAN_INTERCEPTOR_ENTER(ctx, func) \ + AsanInterceptorContext _ctx = {#func}; \ + ctx = (void *)&_ctx; \ + (void) ctx; \ + +#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) +#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ + ASAN_WRITE_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + ASAN_READ_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + do { \ + if (asan_init_is_running) \ + return REAL(func)(__VA_ARGS__); \ + ASAN_INTERCEPTOR_ENTER(ctx, func); \ + if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ + return REAL(func)(__VA_ARGS__); \ + ENSURE_ASAN_INITED(); \ + } while (false) +#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) +// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name) +// But asan does not remember UserId's for threads (pthread_t); +// and remembers all ever existed threads, so the linear search by UserId +// can be slow. +#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) +#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping() +#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping() +#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) +#include "sanitizer_common/sanitizer_common_interceptors.inc" + +// Syscall interceptors don't have contexts, we don't support suppressions +// for them. +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) +#include "sanitizer_common/sanitizer_common_syscalls.inc" + +struct ThreadStartParam { + atomic_uintptr_t t; + atomic_uintptr_t is_registered; +}; + +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg); + AsanThread *t = nullptr; + while ((t = reinterpret_cast<AsanThread *>( + atomic_load(¶m->t, memory_order_acquire))) == 0) + internal_sched_yield(); + SetCurrentThread(t); + return t->ThreadStart(GetTid(), ¶m->is_registered); +} + +#if ASAN_INTERCEPT_PTHREAD_CREATE +INTERCEPTOR(int, pthread_create, void *thread, + void *attr, void *(*start_routine)(void*), void *arg) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + int detached = 0; + if (attr != 0) + REAL(pthread_attr_getdetachstate)(attr, &detached); + ThreadStartParam param; + atomic_store(¶m.t, 0, memory_order_relaxed); + atomic_store(¶m.is_registered, 0, memory_order_relaxed); + int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); + if (result == 0) { + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release); + // Wait until the AsanThread object is initialized and the ThreadRegistry + // entry is in "started" state. One reason for this is that after this + // interceptor exits, the child thread's stack may be the only thing holding + // the |arg| pointer. This may cause LSan to report a leak if leak checking + // happens at a point when the interceptor has already exited, but the stack + // range for the child thread is not yet known. + while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) + internal_sched_yield(); + } + return result; +} +#endif // ASAN_INTERCEPT_PTHREAD_CREATE + +#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION + +#if SANITIZER_ANDROID +INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { + if (!AsanInterceptsSignal(signum) || + common_flags()->allow_user_segv_handler) { + return REAL(bsd_signal)(signum, handler); + } + return 0; +} +#else +INTERCEPTOR(void*, signal, int signum, void *handler) { + if (!AsanInterceptsSignal(signum) || + common_flags()->allow_user_segv_handler) { + return REAL(signal)(signum, handler); + } + return 0; +} +#endif + +INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, + struct sigaction *oldact) { + if (!AsanInterceptsSignal(signum) || + common_flags()->allow_user_segv_handler) { + return REAL(sigaction)(signum, act, oldact); + } + return 0; +} + +namespace __sanitizer { +int real_sigaction(int signum, const void *act, void *oldact) { + return REAL(sigaction)(signum, (const struct sigaction *)act, + (struct sigaction *)oldact); +} +} // namespace __sanitizer + +#elif SANITIZER_POSIX +// We need to have defined REAL(sigaction) on posix systems. +DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act, + struct sigaction *oldact) +#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION + +#if ASAN_INTERCEPT_SWAPCONTEXT +static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { + // Align to page size. + uptr PageSize = GetPageSizeCached(); + uptr bottom = stack & ~(PageSize - 1); + ssize += stack - bottom; + ssize = RoundUpTo(ssize, PageSize); + static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb + if (ssize && ssize <= kMaxSaneContextStackSize) { + PoisonShadow(bottom, ssize, 0); + } +} + +INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, + struct ucontext_t *ucp) { + static bool reported_warning = false; + if (!reported_warning) { + Report("WARNING: ASan doesn't fully support makecontext/swapcontext " + "functions and may produce false positives in some cases!\n"); + reported_warning = true; + } + // Clear shadow memory for new context (it may share stack + // with current context). + uptr stack, ssize; + ReadContextStack(ucp, &stack, &ssize); + ClearShadowMemoryForContextStack(stack, ssize); + int res = REAL(swapcontext)(oucp, ucp); + // swapcontext technically does not return, but program may swap context to + // "oucp" later, that would look as if swapcontext() returned 0. + // We need to clear shadow for ucp once again, as it may be in arbitrary + // state. + ClearShadowMemoryForContextStack(stack, ssize); + return res; +} +#endif // ASAN_INTERCEPT_SWAPCONTEXT + +INTERCEPTOR(void, longjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(longjmp)(env, val); +} + +#if ASAN_INTERCEPT__LONGJMP +INTERCEPTOR(void, _longjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(_longjmp)(env, val); +} +#endif + +#if ASAN_INTERCEPT_SIGLONGJMP +INTERCEPTOR(void, siglongjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(siglongjmp)(env, val); +} +#endif + +#if ASAN_INTERCEPT___CXA_THROW +INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { + CHECK(REAL(__cxa_throw)); + __asan_handle_no_return(); + REAL(__cxa_throw)(a, b, c); +} +#endif + +#if SANITIZER_WINDOWS +INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { + CHECK(REAL(RaiseException)); + __asan_handle_no_return(); + REAL(RaiseException)(a, b, c, d); +} + +INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler3)); + __asan_handle_no_return(); + return REAL(_except_handler3)(a, b, c, d); +} + +#if ASAN_DYNAMIC +// This handler is named differently in -MT and -MD CRTs. +#define _except_handler4 _except_handler4_common +#endif +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler4)); + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} +#endif + +static inline int CharCmp(unsigned char c1, unsigned char c2) { + return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; +} + +INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memcmp); + if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size); + ENSURE_ASAN_INITED(); + if (flags()->replace_intrin) { + if (flags()->strict_memcmp) { + // Check the entire regions even if the first bytes of the buffers are + // different. + ASAN_READ_RANGE(ctx, a1, size); + ASAN_READ_RANGE(ctx, a2, size); + // Fallthrough to REAL(memcmp) below. + } else { + unsigned char c1 = 0, c2 = 0; + const unsigned char *s1 = (const unsigned char*)a1; + const unsigned char *s2 = (const unsigned char*)a2; + uptr i; + for (i = 0; i < size; i++) { + c1 = s1[i]; + c2 = s2[i]; + if (c1 != c2) break; + } + ASAN_READ_RANGE(ctx, s1, Min(i + 1, size)); + ASAN_READ_RANGE(ctx, s2, Min(i + 1, size)); + return CharCmp(c1, c2); + } + } + return REAL(memcmp(a1, a2, size)); +} + +// memcpy is called during __asan_init() from the internals of printf(...). +// We do not treat memcpy with to==from as a bug. +// See http://llvm.org/bugs/show_bug.cgi?id=11763. +#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \ + if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \ + if (asan_init_is_running) { \ + return REAL(memcpy)(to, from, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + if (to != from) { \ + CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \ + } \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return REAL(memcpy)(to, from, size); \ + } while (0) + + +void *__asan_memcpy(void *to, const void *from, uptr size) { + ASAN_MEMCPY_IMPL(nullptr, to, from, size); +} + +// memset is called inside Printf. +#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \ + if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \ + if (asan_init_is_running) { \ + return REAL(memset)(block, c, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_WRITE_RANGE(ctx, block, size); \ + } \ + return REAL(memset)(block, c, size); \ + } while (0) + +void *__asan_memset(void *block, int c, uptr size) { + ASAN_MEMSET_IMPL(nullptr, block, c, size); +} + +#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \ + if (UNLIKELY(!asan_inited)) \ + return internal_memmove(to, from, size); \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return internal_memmove(to, from, size); \ + } while (0) + +void *__asan_memmove(void *to, const void *from, uptr size) { + ASAN_MEMMOVE_IMPL(nullptr, to, from, size); +} + +INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memmove); + ASAN_MEMMOVE_IMPL(ctx, to, from, size); +} + +INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memcpy); +#if !SANITIZER_MAC + ASAN_MEMCPY_IMPL(ctx, to, from, size); +#else + // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced + // with WRAP(memcpy). As a result, false positives are reported for memmove() + // calls. If we just disable error reporting with + // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with + // internal_memcpy(), which may lead to crashes, see + // http://llvm.org/bugs/show_bug.cgi?id=16362. + ASAN_MEMMOVE_IMPL(ctx, to, from, size); +#endif // !SANITIZER_MAC +} + +INTERCEPTOR(void*, memset, void *block, int c, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memset); + ASAN_MEMSET_IMPL(ctx, block, c, size); +} + +INTERCEPTOR(char*, strchr, const char *str, int c) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strchr); + if (UNLIKELY(!asan_inited)) return internal_strchr(str, c); + // strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is + // used. + if (asan_init_is_running) { + return REAL(strchr)(str, c); + } + ENSURE_ASAN_INITED(); + char *result = REAL(strchr)(str, c); + if (flags()->replace_str) { + uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1; + ASAN_READ_RANGE(ctx, str, bytes_read); + } + return result; +} + +#if ASAN_INTERCEPT_INDEX +# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX +INTERCEPTOR(char*, index, const char *string, int c) + ALIAS(WRAPPER_NAME(strchr)); +# else +# if SANITIZER_MAC +DECLARE_REAL(char*, index, const char *string, int c) +OVERRIDE_FUNCTION(index, strchr); +# else +DEFINE_REAL(char*, index, const char *string, int c) +# endif +# endif +#endif // ASAN_INTERCEPT_INDEX + +// For both strcat() and strncat() we need to check the validity of |to| +// argument irrespective of the |from| length. +INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_length = REAL(strlen)(from); + ASAN_READ_RANGE(ctx, from, from_length + 1); + uptr to_length = REAL(strlen)(to); + ASAN_READ_RANGE(ctx, to, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); + // If the copying actually happens, the |from| string should not overlap + // with the resulting string starting at |to|, which has a length of + // to_length + from_length + 1. + if (from_length > 0) { + CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1, + from, from_length + 1); + } + } + return REAL(strcat)(to, from); // NOLINT +} + +INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncat); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_length = MaybeRealStrnlen(from, size); + uptr copy_length = Min(size, from_length + 1); + ASAN_READ_RANGE(ctx, from, copy_length); + uptr to_length = REAL(strlen)(to); + ASAN_READ_RANGE(ctx, to, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); + if (from_length > 0) { + CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, + from, copy_length); + } + } + return REAL(strncat)(to, from, size); +} + +INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT +#endif + // strcpy is called from malloc_default_purgeable_zone() + // in __asan::ReplaceSystemAlloc() on Mac. + if (asan_init_is_running) { + return REAL(strcpy)(to, from); // NOLINT + } + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_size = REAL(strlen)(from) + 1; + CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, from_size); + } + return REAL(strcpy)(to, from); // NOLINT +} + +#if ASAN_INTERCEPT_STRDUP +INTERCEPTOR(char*, strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); + if (UNLIKELY(!asan_inited)) return internal_strdup(s); + ENSURE_ASAN_INITED(); + uptr length = REAL(strlen)(s); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, length + 1); + } + GET_STACK_TRACE_MALLOC; + void *new_mem = asan_malloc(length + 1, &stack); + REAL(memcpy)(new_mem, s, length + 1); + return reinterpret_cast<char*>(new_mem); +} +#endif + +INTERCEPTOR(SIZE_T, strlen, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strlen); + if (UNLIKELY(!asan_inited)) return internal_strlen(s); + // strlen is called from malloc_default_purgeable_zone() + // in __asan::ReplaceSystemAlloc() on Mac. + if (asan_init_is_running) { + return REAL(strlen)(s); + } + ENSURE_ASAN_INITED(); + SIZE_T length = REAL(strlen)(s); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, length + 1); + } + return length; +} + +INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcslen); + SIZE_T length = REAL(wcslen)(s); + if (!asan_init_is_running) { + ENSURE_ASAN_INITED(); + ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t)); + } + return length; +} + +INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncpy); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); + CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size); + } + return REAL(strncpy)(to, from, size); +} + +#if ASAN_INTERCEPT_STRNLEN +INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strnlen); + ENSURE_ASAN_INITED(); + uptr length = REAL(strnlen)(s, maxlen); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen)); + } + return length; +} +#endif // ASAN_INTERCEPT_STRNLEN + +static inline bool IsValidStrtolBase(int base) { + return (base == 0) || (2 <= base && base <= 36); +} + +static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) { + CHECK(endptr); + if (nptr == *endptr) { + // No digits were found at strtol call, we need to find out the last + // symbol accessed by strtoll on our own. + // We get this symbol by skipping leading blanks and optional +/- sign. + while (IsSpace(*nptr)) nptr++; + if (*nptr == '+' || *nptr == '-') nptr++; + *endptr = const_cast<char *>(nptr); + } + CHECK(*endptr >= nptr); +} + +INTERCEPTOR(long, strtol, const char *nptr, // NOLINT + char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtol); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(strtol)(nptr, endptr, base); + } + char *real_endptr; + long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT + if (endptr != 0) { + *endptr = real_endptr; + } + if (IsValidStrtolBase(base)) { + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1); + } + return result; +} + +INTERCEPTOR(int, atoi, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoi); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); +#endif + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atoi)(nptr); + } + char *real_endptr; + // "man atoi" tells that behavior of atoi(nptr) is the same as + // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the + // parsed integer can't be stored in *long* type (even if it's + // different from int). So, we just imitate this behavior. + int result = REAL(strtol)(nptr, &real_endptr, 10); + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} + +INTERCEPTOR(long, atol, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atol); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); +#endif + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atol)(nptr); + } + char *real_endptr; + long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} + +#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL +INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT + char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtoll); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(strtoll)(nptr, endptr, base); + } + char *real_endptr; + long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT + if (endptr != 0) { + *endptr = real_endptr; + } + // If base has unsupported value, strtoll can exit with EINVAL + // without reading any characters. So do additional checks only + // if base is valid. + if (IsValidStrtolBase(base)) { + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1); + } + return result; +} + +INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoll); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atoll)(nptr); + } + char *real_endptr; + long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} +#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL + +static void AtCxaAtexit(void *unused) { + (void)unused; + StopInitOrderChecking(); +} + +#if ASAN_INTERCEPT___CXA_ATEXIT +INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, + void *dso_handle) { +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle); +#endif + ENSURE_ASAN_INITED(); + int res = REAL(__cxa_atexit)(func, arg, dso_handle); + REAL(__cxa_atexit)(AtCxaAtexit, 0, 0); + return res; +} +#endif // ASAN_INTERCEPT___CXA_ATEXIT + +#if ASAN_INTERCEPT_FORK +INTERCEPTOR(int, fork, void) { + ENSURE_ASAN_INITED(); + if (common_flags()->coverage) CovBeforeFork(); + int pid = REAL(fork)(); + if (common_flags()->coverage) CovAfterFork(pid); + return pid; +} +#endif // ASAN_INTERCEPT_FORK + +#if SANITIZER_WINDOWS +INTERCEPTOR_WINAPI(DWORD, CreateThread, + void* security, uptr stack_size, + DWORD (__stdcall *start_routine)(void*), void* arg, + DWORD thr_flags, void* tid) { + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + bool detached = false; // FIXME: how can we determine it on Windows? + ThreadStartParam param; + atomic_store(¶m.t, 0, memory_order_relaxed); + atomic_store(¶m.is_registered, 0, memory_order_relaxed); + DWORD result = REAL(CreateThread)(security, stack_size, asan_thread_start, + ¶m, thr_flags, tid); + if (result) { + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release); + // The pthread_create interceptor waits here, so we do the same for + // consistency. + while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) + internal_sched_yield(); + } + return result; +} + +namespace __asan { +void InitializeWindowsInterceptors() { + ASAN_INTERCEPT_FUNC(CreateThread); + ASAN_INTERCEPT_FUNC(RaiseException); + ASAN_INTERCEPT_FUNC(_except_handler3); + ASAN_INTERCEPT_FUNC(_except_handler4); +} + +} // namespace __asan +#endif + +// ---------------------- InitializeAsanInterceptors ---------------- {{{1 +namespace __asan { +void InitializeAsanInterceptors() { + static bool was_called_once; + CHECK(was_called_once == false); + was_called_once = true; + InitializeCommonInterceptors(); + + // Intercept mem* functions. + ASAN_INTERCEPT_FUNC(memcmp); + ASAN_INTERCEPT_FUNC(memmove); + ASAN_INTERCEPT_FUNC(memset); + if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { + ASAN_INTERCEPT_FUNC(memcpy); + } + + // Intercept str* functions. + ASAN_INTERCEPT_FUNC(strcat); // NOLINT + ASAN_INTERCEPT_FUNC(strchr); + ASAN_INTERCEPT_FUNC(strcpy); // NOLINT + ASAN_INTERCEPT_FUNC(strlen); + ASAN_INTERCEPT_FUNC(wcslen); + ASAN_INTERCEPT_FUNC(strncat); + ASAN_INTERCEPT_FUNC(strncpy); +#if ASAN_INTERCEPT_STRDUP + ASAN_INTERCEPT_FUNC(strdup); +#endif +#if ASAN_INTERCEPT_STRNLEN + ASAN_INTERCEPT_FUNC(strnlen); +#endif +#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX + ASAN_INTERCEPT_FUNC(index); +#endif + + ASAN_INTERCEPT_FUNC(atoi); + ASAN_INTERCEPT_FUNC(atol); + ASAN_INTERCEPT_FUNC(strtol); +#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL + ASAN_INTERCEPT_FUNC(atoll); + ASAN_INTERCEPT_FUNC(strtoll); +#endif + + // Intecept signal- and jump-related functions. + ASAN_INTERCEPT_FUNC(longjmp); +#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION + ASAN_INTERCEPT_FUNC(sigaction); +#if SANITIZER_ANDROID + ASAN_INTERCEPT_FUNC(bsd_signal); +#else + ASAN_INTERCEPT_FUNC(signal); +#endif +#endif +#if ASAN_INTERCEPT_SWAPCONTEXT + ASAN_INTERCEPT_FUNC(swapcontext); +#endif +#if ASAN_INTERCEPT__LONGJMP + ASAN_INTERCEPT_FUNC(_longjmp); +#endif +#if ASAN_INTERCEPT_SIGLONGJMP + ASAN_INTERCEPT_FUNC(siglongjmp); +#endif + + // Intercept exception handling functions. +#if ASAN_INTERCEPT___CXA_THROW + ASAN_INTERCEPT_FUNC(__cxa_throw); +#endif + + // Intercept threading-related functions +#if ASAN_INTERCEPT_PTHREAD_CREATE + ASAN_INTERCEPT_FUNC(pthread_create); +#endif + + // Intercept atexit function. +#if ASAN_INTERCEPT___CXA_ATEXIT + ASAN_INTERCEPT_FUNC(__cxa_atexit); +#endif + +#if ASAN_INTERCEPT_FORK + ASAN_INTERCEPT_FUNC(fork); +#endif + + // Some Windows-specific interceptors. +#if SANITIZER_WINDOWS + InitializeWindowsInterceptors(); +#endif + + VReport(1, "AddressSanitizer: libc interceptors initialized\n"); +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors.h b/contrib/compiler-rt/lib/asan/asan_interceptors.h new file mode 100644 index 000000000000..ee3b82aa7ef9 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interceptors.h @@ -0,0 +1,108 @@ +//===-- asan_interceptors.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_interceptors.cc +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERCEPTORS_H +#define ASAN_INTERCEPTORS_H + +#include "asan_internal.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" + +// Use macro to describe if specific function should be +// intercepted on a given platform. +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1 +# define ASAN_INTERCEPT__LONGJMP 1 +# define ASAN_INTERCEPT_STRDUP 1 +# define ASAN_INTERCEPT_INDEX 1 +# define ASAN_INTERCEPT_PTHREAD_CREATE 1 +# define ASAN_INTERCEPT_FORK 1 +#else +# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0 +# define ASAN_INTERCEPT__LONGJMP 0 +# define ASAN_INTERCEPT_STRDUP 0 +# define ASAN_INTERCEPT_INDEX 0 +# define ASAN_INTERCEPT_PTHREAD_CREATE 0 +# define ASAN_INTERCEPT_FORK 0 +#endif + +#if SANITIZER_FREEBSD || SANITIZER_LINUX +# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1 +#else +# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 +#endif + +#if !SANITIZER_MAC +# define ASAN_INTERCEPT_STRNLEN 1 +#else +# define ASAN_INTERCEPT_STRNLEN 0 +#endif + +#if SANITIZER_LINUX && !SANITIZER_ANDROID +# define ASAN_INTERCEPT_SWAPCONTEXT 1 +#else +# define ASAN_INTERCEPT_SWAPCONTEXT 0 +#endif + +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1 +#else +# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0 +#endif + +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT_SIGLONGJMP 1 +#else +# define ASAN_INTERCEPT_SIGLONGJMP 0 +#endif + +// Android bug: https://code.google.com/p/android/issues/detail?id=61799 +#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \ + !(SANITIZER_ANDROID && defined(__i386)) +# define ASAN_INTERCEPT___CXA_THROW 1 +#else +# define ASAN_INTERCEPT___CXA_THROW 0 +#endif + +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT___CXA_ATEXIT 1 +#else +# define ASAN_INTERCEPT___CXA_ATEXIT 0 +#endif + +DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size) +DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size) +DECLARE_REAL(void*, memset, void *block, int c, uptr size) +DECLARE_REAL(char*, strchr, const char *str, int c) +DECLARE_REAL(SIZE_T, strlen, const char *s) +DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size) +DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen) +DECLARE_REAL(char*, strstr, const char *s1, const char *s2) +struct sigaction; +DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act, + struct sigaction *oldact) + +namespace __asan { + +void InitializeAsanInterceptors(); + +#define ENSURE_ASAN_INITED() do { \ + CHECK(!asan_init_is_running); \ + if (UNLIKELY(!asan_inited)) { \ + AsanInitFromRtl(); \ + } \ +} while (0) + +} // namespace __asan + +#endif // ASAN_INTERCEPTORS_H diff --git a/contrib/compiler-rt/lib/asan/asan_interface_internal.h b/contrib/compiler-rt/lib/asan/asan_interface_internal.h new file mode 100644 index 000000000000..edaf44d7893a --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_interface_internal.h @@ -0,0 +1,182 @@ +//===-- asan_interface_internal.h -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This header can be included by the instrumented program to fetch +// data (mostly allocator statistics) from ASan runtime library. +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERFACE_INTERNAL_H +#define ASAN_INTERFACE_INTERNAL_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +#include "asan_init_version.h" + +using __sanitizer::uptr; + +extern "C" { + // This function should be called at the very beginning of the process, + // before any instrumented code is executed and before any call to malloc. + // Please note that __asan_init is a macro that is replaced with + // __asan_init_vXXX at compile-time. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_init(); + + // This structure is used to describe the source location of a place where + // global was defined. + struct __asan_global_source_location { + const char *filename; + int line_no; + int column_no; + }; + + // This structure describes an instrumented global variable. + struct __asan_global { + uptr beg; // The address of the global. + uptr size; // The original size of the global. + uptr size_with_redzone; // The size with the redzone. + const char *name; // Name as a C string. + const char *module_name; // Module name as a C string. This pointer is a + // unique identifier of a module. + uptr has_dynamic_init; // Non-zero if the global has dynamic initializer. + __asan_global_source_location *location; // Source location of a global, + // or NULL if it is unknown. + }; + + // These two functions should be called by the instrumented code. + // 'globals' is an array of structures describing 'n' globals. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_register_globals(__asan_global *globals, uptr n); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unregister_globals(__asan_global *globals, uptr n); + + // These two functions should be called before and after dynamic initializers + // of a single module run, respectively. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_before_dynamic_init(const char *module_name); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_after_dynamic_init(); + + // These two functions are used by instrumented code in the + // use-after-scope mode. They mark memory for local variables as + // unaddressable when they leave scope and addressable before the + // function exits. + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_stack_memory(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unpoison_stack_memory(uptr addr, uptr size); + + // Performs cleanup before a NoReturn function. Must be called before things + // like _exit and execl to avoid false positives on stack. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_no_return(); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_memory_region(void const volatile *addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unpoison_memory_region(void const volatile *addr, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_address_is_poisoned(void const volatile *addr); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_region_is_poisoned(uptr beg, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_describe_address(uptr addr); + + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_report_present(); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_pc(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_bp(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_sp(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_address(); + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_get_report_access_type(); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_report_access_size(); + SANITIZER_INTERFACE_ATTRIBUTE + const char * __asan_get_report_description(); + + SANITIZER_INTERFACE_ATTRIBUTE + const char * __asan_locate_address(uptr addr, char *name, uptr name_size, + uptr *region_address, uptr *region_size); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, + u32 *thread_id); + + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, + u32 *thread_id); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_report_error(uptr pc, uptr bp, uptr sp, + uptr addr, int is_write, uptr access_size); + + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_set_error_exit_code(int exit_code); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_death_callback(void (*callback)(void)); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_set_error_report_callback(void (*callback)(const char*)); + + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE + /* OPTIONAL */ void __asan_on_error(); + + SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats(); + + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE + /* OPTIONAL */ const char* __asan_default_options(); + + // Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return + SANITIZER_INTERFACE_ATTRIBUTE + extern int __asan_option_detect_stack_use_after_return; + + SANITIZER_INTERFACE_ATTRIBUTE + extern uptr *__asan_test_only_reported_buggy_pointer; + + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size); + + SANITIZER_INTERFACE_ATTRIBUTE + void* __asan_memcpy(void *dst, const void *src, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void* __asan_memset(void *s, int c, uptr n); + SANITIZER_INTERFACE_ATTRIBUTE + void* __asan_memmove(void* dest, const void* src, uptr n); + + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_cxx_array_cookie(uptr p); + SANITIZER_INTERFACE_ATTRIBUTE + uptr __asan_load_cxx_array_cookie(uptr *p); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_poison_intra_object_redzone(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_unpoison_intra_object_redzone(uptr p, uptr size); +} // extern "C" + +#endif // ASAN_INTERFACE_INTERNAL_H diff --git a/contrib/compiler-rt/lib/asan/asan_internal.h b/contrib/compiler-rt/lib/asan/asan_internal.h new file mode 100644 index 000000000000..65d4a47d3d9e --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_internal.h @@ -0,0 +1,162 @@ +//===-- asan_internal.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header which defines various general utilities. +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERNAL_H +#define ASAN_INTERNAL_H + +#include "asan_flags.h" +#include "asan_interface_internal.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_libc.h" + +#define ASAN_DEFAULT_FAILURE_EXITCODE 1 + +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +# error "The AddressSanitizer run-time should not be" + " instrumented by AddressSanitizer" +#endif + +// Build-time configuration options. + +// If set, asan will intercept C++ exception api call(s). +#ifndef ASAN_HAS_EXCEPTIONS +# define ASAN_HAS_EXCEPTIONS 1 +#endif + +// If set, values like allocator chunk size, as well as defaults for some flags +// will be changed towards less memory overhead. +#ifndef ASAN_LOW_MEMORY +#if SANITIZER_WORDSIZE == 32 +# define ASAN_LOW_MEMORY 1 +#else +# define ASAN_LOW_MEMORY 0 +# endif +#endif + +#ifndef ASAN_DYNAMIC +# ifdef PIC +# define ASAN_DYNAMIC 1 +# else +# define ASAN_DYNAMIC 0 +# endif +#endif + +// All internal functions in asan reside inside the __asan namespace +// to avoid namespace collisions with the user programs. +// Separate namespace also makes it simpler to distinguish the asan run-time +// functions from the instrumented user code in a profile. +namespace __asan { + +class AsanThread; +using __sanitizer::StackTrace; + +struct SignalContext { + void *context; + uptr addr; + uptr pc; + uptr sp; + uptr bp; + + SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) : + context(context), addr(addr), pc(pc), sp(sp), bp(bp) { + } + + // Creates signal context in a platform-specific manner. + static SignalContext Create(void *siginfo, void *context); +}; + +void AsanInitFromRtl(); + +// asan_rtl.cc +void NORETURN ShowStatsAndAbort(); + +// asan_malloc_linux.cc / asan_malloc_mac.cc +void ReplaceSystemMalloc(); + +// asan_linux.cc / asan_mac.cc / asan_win.cc +void *AsanDoesNotSupportStaticLinkage(); +void AsanCheckDynamicRTPrereqs(); +void AsanCheckIncompatibleRT(); + +void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); +void AsanOnSIGSEGV(int, void *siginfo, void *context); + +void MaybeReexec(); +bool AsanInterceptsSignal(int signum); +void ReadContextStack(void *context, uptr *stack, uptr *ssize); +void AsanPlatformThreadInit(); +void StopInitOrderChecking(); + +// Wrapper for TLS/TSD. +void AsanTSDInit(void (*destructor)(void *tsd)); +void *AsanTSDGet(); +void AsanTSDSet(void *tsd); +void PlatformTSDDtor(void *tsd); + +void AppendToErrorMessageBuffer(const char *buffer); + +void ParseExtraActivationFlags(); + +void *AsanDlSymNext(const char *sym); + +// Platform-specific options. +#if SANITIZER_MAC +bool PlatformHasDifferentMemcpyAndMemmove(); +# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \ + (PlatformHasDifferentMemcpyAndMemmove()) +#else +# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true +#endif // SANITIZER_MAC + +// Add convenient macro for interface functions that may be represented as +// weak hooks. +#define ASAN_MALLOC_HOOK(ptr, size) \ + if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size) +#define ASAN_FREE_HOOK(ptr) \ + if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr) +#define ASAN_ON_ERROR() \ + if (&__asan_on_error) __asan_on_error() + +extern int asan_inited; +// Used to avoid infinite recursion in __asan_init(). +extern bool asan_init_is_running; +extern void (*death_callback)(void); + +// These magic values are written to shadow for better error reporting. +const int kAsanHeapLeftRedzoneMagic = 0xfa; +const int kAsanHeapRightRedzoneMagic = 0xfb; +const int kAsanHeapFreeMagic = 0xfd; +const int kAsanStackLeftRedzoneMagic = 0xf1; +const int kAsanStackMidRedzoneMagic = 0xf2; +const int kAsanStackRightRedzoneMagic = 0xf3; +const int kAsanStackPartialRedzoneMagic = 0xf4; +const int kAsanStackAfterReturnMagic = 0xf5; +const int kAsanInitializationOrderMagic = 0xf6; +const int kAsanUserPoisonedMemoryMagic = 0xf7; +const int kAsanContiguousContainerOOBMagic = 0xfc; +const int kAsanStackUseAfterScopeMagic = 0xf8; +const int kAsanGlobalRedzoneMagic = 0xf9; +const int kAsanInternalHeapMagic = 0xfe; +const int kAsanArrayCookieMagic = 0xac; +const int kAsanIntraObjectRedzone = 0xbb; +const int kAsanAllocaLeftMagic = 0xca; +const int kAsanAllocaRightMagic = 0xcb; + +static const uptr kCurrentStackFrameMagic = 0x41B58AB3; +static const uptr kRetiredStackFrameMagic = 0x45E0360E; + +} // namespace __asan + +#endif // ASAN_INTERNAL_H diff --git a/contrib/compiler-rt/lib/asan/asan_linux.cc b/contrib/compiler-rt/lib/asan/asan_linux.cc new file mode 100644 index 000000000000..fdd009c960d8 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_linux.cc @@ -0,0 +1,249 @@ +//===-- asan_linux.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_LINUX + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_freebsd.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#include <sys/time.h> +#include <sys/resource.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <dlfcn.h> +#include <fcntl.h> +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> +#include <unwind.h> + +#if SANITIZER_FREEBSD +#include <sys/link_elf.h> +#endif + +#if SANITIZER_ANDROID || SANITIZER_FREEBSD +#include <ucontext.h> +extern "C" void* _DYNAMIC; +#else +#include <sys/ucontext.h> +#include <link.h> +#endif + +// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in +// 32-bit mode. +#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \ + __FreeBSD_version <= 902001 // v9.2 +#define ucontext_t xucontext_t +#endif + +typedef enum { + ASAN_RT_VERSION_UNDEFINED = 0, + ASAN_RT_VERSION_DYNAMIC, + ASAN_RT_VERSION_STATIC, +} asan_rt_version_t; + +// FIXME: perhaps also store abi version here? +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +asan_rt_version_t __asan_rt_version; +} + +namespace __asan { + +void MaybeReexec() { + // No need to re-exec on Linux. +} + +void *AsanDoesNotSupportStaticLinkage() { + // This will fail to link with -static. + return &_DYNAMIC; // defined in link.h +} + +#if SANITIZER_ANDROID +// FIXME: should we do anything for Android? +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +#else +static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, + void *data) { + // Continue until the first dynamic library is found + if (!info->dlpi_name || info->dlpi_name[0] == 0) + return 0; + + // Ignore vDSO + if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) + return 0; + + *(const char **)data = info->dlpi_name; + return 1; +} + +static bool IsDynamicRTName(const char *libname) { + return internal_strstr(libname, "libclang_rt.asan") || + internal_strstr(libname, "libasan.so"); +} + +static void ReportIncompatibleRT() { + Report("Your application is linked against incompatible ASan runtimes.\n"); + Die(); +} + +void AsanCheckDynamicRTPrereqs() { + // Ensure that dynamic RT is the first DSO in the list + const char *first_dso_name = 0; + dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); + if (first_dso_name && !IsDynamicRTName(first_dso_name)) { + Report("ASan runtime does not come first in initial library list; " + "you should either link runtime to your application or " + "manually preload it with LD_PRELOAD.\n"); + Die(); + } +} + +void AsanCheckIncompatibleRT() { + if (ASAN_DYNAMIC) { + if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { + __asan_rt_version = ASAN_RT_VERSION_DYNAMIC; + } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) { + ReportIncompatibleRT(); + } + } else { + if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { + // Ensure that dynamic runtime is not present. We should detect it + // as early as possible, otherwise ASan interceptors could bind to + // the functions in dynamic ASan runtime instead of the functions in + // system libraries, causing crashes later in ASan initialization. + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + char filename[128]; + while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) { + if (IsDynamicRTName(filename)) { + Report("Your application is linked against " + "incompatible ASan runtimes.\n"); + Die(); + } + } + __asan_rt_version = ASAN_RT_VERSION_STATIC; + } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) { + ReportIncompatibleRT(); + } + } +} +#endif // SANITIZER_ANDROID + +void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { +#if defined(__arm__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.arm_pc; + *bp = ucontext->uc_mcontext.arm_fp; + *sp = ucontext->uc_mcontext.arm_sp; +#elif defined(__aarch64__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.regs[29]; + *sp = ucontext->uc_mcontext.sp; +#elif defined(__hppa__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.sc_iaoq[0]; + /* GCC uses %r3 whenever a frame pointer is needed. */ + *bp = ucontext->uc_mcontext.sc_gr[3]; + *sp = ucontext->uc_mcontext.sc_gr[30]; +#elif defined(__x86_64__) +# if SANITIZER_FREEBSD + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.mc_rip; + *bp = ucontext->uc_mcontext.mc_rbp; + *sp = ucontext->uc_mcontext.mc_rsp; +# else + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.gregs[REG_RIP]; + *bp = ucontext->uc_mcontext.gregs[REG_RBP]; + *sp = ucontext->uc_mcontext.gregs[REG_RSP]; +# endif +#elif defined(__i386__) +# if SANITIZER_FREEBSD + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.mc_eip; + *bp = ucontext->uc_mcontext.mc_ebp; + *sp = ucontext->uc_mcontext.mc_esp; +# else + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.gregs[REG_EIP]; + *bp = ucontext->uc_mcontext.gregs[REG_EBP]; + *sp = ucontext->uc_mcontext.gregs[REG_ESP]; +# endif +#elif defined(__powerpc__) || defined(__powerpc64__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.regs->nip; + *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; + // The powerpc{,64}-linux ABIs do not specify r31 as the frame + // pointer, but GCC always uses r31 when we need a frame pointer. + *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; +#elif defined(__sparc__) + ucontext_t *ucontext = (ucontext_t*)context; + uptr *stk_ptr; +# if defined (__arch64__) + *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; + *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; + stk_ptr = (uptr *) (*sp + 2047); + *bp = stk_ptr[15]; +# else + *pc = ucontext->uc_mcontext.gregs[REG_PC]; + *sp = ucontext->uc_mcontext.gregs[REG_O6]; + stk_ptr = (uptr *) *sp; + *bp = stk_ptr[15]; +# endif +#elif defined(__mips__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.gregs[31]; + *bp = ucontext->uc_mcontext.gregs[30]; + *sp = ucontext->uc_mcontext.gregs[29]; +#else +# error "Unsupported arch" +#endif +} + +bool AsanInterceptsSignal(int signum) { + return signum == SIGSEGV && common_flags()->handle_segv; +} + +void AsanPlatformThreadInit() { + // Nothing here for now. +} + +#if !SANITIZER_ANDROID +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + ucontext_t *ucp = (ucontext_t*)context; + *stack = (uptr)ucp->uc_stack.ss_sp; + *ssize = ucp->uc_stack.ss_size; +} +#else +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} +#endif + +void *AsanDlSymNext(const char *sym) { + return dlsym(RTLD_NEXT, sym); +} + +} // namespace __asan + +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/contrib/compiler-rt/lib/asan/asan_lock.h b/contrib/compiler-rt/lib/asan/asan_lock.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_lock.h diff --git a/contrib/compiler-rt/lib/asan/asan_mac.cc b/contrib/compiler-rt/lib/asan/asan_mac.cc new file mode 100644 index 000000000000..ae0fa15b6523 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_mac.cc @@ -0,0 +1,419 @@ +//===-- asan_mac.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mac.h" + +#include <crt_externs.h> // for _NSGetArgv +#include <dlfcn.h> // for dladdr() +#include <mach-o/dyld.h> +#include <mach-o/loader.h> +#include <sys/mman.h> +#include <sys/resource.h> +#include <sys/sysctl.h> +#include <sys/ucontext.h> +#include <fcntl.h> +#include <pthread.h> +#include <stdlib.h> // for free() +#include <unistd.h> +#include <libkern/OSAtomic.h> + +namespace __asan { + +void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + ucontext_t *ucontext = (ucontext_t*)context; +# if SANITIZER_WORDSIZE == 64 + *pc = ucontext->uc_mcontext->__ss.__rip; + *bp = ucontext->uc_mcontext->__ss.__rbp; + *sp = ucontext->uc_mcontext->__ss.__rsp; +# else + *pc = ucontext->uc_mcontext->__ss.__eip; + *bp = ucontext->uc_mcontext->__ss.__ebp; + *sp = ucontext->uc_mcontext->__ss.__esp; +# endif // SANITIZER_WORDSIZE +} + + +bool PlatformHasDifferentMemcpyAndMemmove() { + // On OS X 10.7 memcpy() and memmove() are both resolved + // into memmove$VARIANT$sse42. + // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34. + // TODO(glider): need to check dynamically that memcpy() and memmove() are + // actually the same function. + return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD; +} + +extern "C" +void __asan_init(); + +static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES"; +LowLevelAllocator allocator_for_env; + +// Change the value of the env var |name|, leaking the original value. +// If |name_value| is NULL, the variable is deleted from the environment, +// otherwise the corresponding "NAME=value" string is replaced with +// |name_value|. +void LeakyResetEnv(const char *name, const char *name_value) { + char ***env_ptr = _NSGetEnviron(); + CHECK(env_ptr); + char **environ = *env_ptr; + CHECK(environ); + uptr name_len = internal_strlen(name); + while (*environ != 0) { + uptr len = internal_strlen(*environ); + if (len > name_len) { + const char *p = *environ; + if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { + // Match. + if (name_value) { + // Replace the old value with the new one. + *environ = const_cast<char*>(name_value); + } else { + // Shift the subsequent pointers back. + char **del = environ; + do { + del[0] = del[1]; + } while (*del++); + } + } + } + environ++; + } +} + +void MaybeReexec() { + if (!flags()->allow_reexec) return; + // Make sure the dynamic ASan runtime library is preloaded so that the + // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec + // ourselves. + Dl_info info; + CHECK(dladdr((void*)((uptr)__asan_init), &info)); + char *dyld_insert_libraries = + const_cast<char*>(GetEnv(kDyldInsertLibraries)); + uptr old_env_len = dyld_insert_libraries ? + internal_strlen(dyld_insert_libraries) : 0; + uptr fname_len = internal_strlen(info.dli_fname); + if (!dyld_insert_libraries || + !REAL(strstr)(dyld_insert_libraries, StripModuleName(info.dli_fname))) { + // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime + // library. + char program_name[1024]; + uint32_t buf_size = sizeof(program_name); + _NSGetExecutablePath(program_name, &buf_size); + char *new_env = const_cast<char*>(info.dli_fname); + if (dyld_insert_libraries) { + // Append the runtime dylib name to the existing value of + // DYLD_INSERT_LIBRARIES. + new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2); + internal_strncpy(new_env, dyld_insert_libraries, old_env_len); + new_env[old_env_len] = ':'; + // Copy fname_len and add a trailing zero. + internal_strncpy(new_env + old_env_len + 1, info.dli_fname, + fname_len + 1); + // Ok to use setenv() since the wrappers don't depend on the value of + // asan_inited. + setenv(kDyldInsertLibraries, new_env, /*overwrite*/1); + } else { + // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name. + setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0); + } + VReport(1, "exec()-ing the program with\n"); + VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env); + VReport(1, "to enable ASan wrappers.\n"); + VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n"); + execv(program_name, *_NSGetArgv()); + } else { + // DYLD_INSERT_LIBRARIES is set and contains the runtime library. + if (old_env_len == fname_len) { + // It's just the runtime library name - fine to unset the variable. + LeakyResetEnv(kDyldInsertLibraries, NULL); + } else { + uptr env_name_len = internal_strlen(kDyldInsertLibraries); + // Allocate memory to hold the previous env var name, its value, the '=' + // sign and the '\0' char. + char *new_env = (char*)allocator_for_env.Allocate( + old_env_len + 2 + env_name_len); + CHECK(new_env); + internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); + internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); + new_env[env_name_len] = '='; + char *new_env_pos = new_env + env_name_len + 1; + + // Iterate over colon-separated pieces of |dyld_insert_libraries|. + char *piece_start = dyld_insert_libraries; + char *piece_end = NULL; + char *old_env_end = dyld_insert_libraries + old_env_len; + do { + if (piece_start[0] == ':') piece_start++; + piece_end = REAL(strchr)(piece_start, ':'); + if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; + if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; + uptr piece_len = piece_end - piece_start; + + // If the current piece isn't the runtime library name, + // append it to new_env. + if ((piece_len != fname_len) || + (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) { + if (new_env_pos != new_env + env_name_len + 1) { + new_env_pos[0] = ':'; + new_env_pos++; + } + internal_strncpy(new_env_pos, piece_start, piece_len); + } + // Move on to the next piece. + new_env_pos += piece_len; + piece_start = piece_end; + } while (piece_start < old_env_end); + + // Can't use setenv() here, because it requires the allocator to be + // initialized. + // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in + // a separate function called after InitializeAllocator(). + LeakyResetEnv(kDyldInsertLibraries, new_env); + } + } +} + +// No-op. Mac does not support static linkage anyway. +void *AsanDoesNotSupportStaticLinkage() { + return 0; +} + +// No-op. Mac does not support static linkage anyway. +void AsanCheckDynamicRTPrereqs() {} + +// No-op. Mac does not support static linkage anyway. +void AsanCheckIncompatibleRT() {} + +bool AsanInterceptsSignal(int signum) { + return (signum == SIGSEGV || signum == SIGBUS) && + common_flags()->handle_segv; +} + +void AsanPlatformThreadInit() { +} + +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} + +// Support for the following functions from libdispatch on Mac OS: +// dispatch_async_f() +// dispatch_async() +// dispatch_sync_f() +// dispatch_sync() +// dispatch_after_f() +// dispatch_after() +// dispatch_group_async_f() +// dispatch_group_async() +// TODO(glider): libdispatch API contains other functions that we don't support +// yet. +// +// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are +// they can cause jobs to run on a thread different from the current one. +// TODO(glider): if so, we need a test for this (otherwise we should remove +// them). +// +// The following functions use dispatch_barrier_async_f() (which isn't a library +// function but is exported) and are thus supported: +// dispatch_source_set_cancel_handler_f() +// dispatch_source_set_cancel_handler() +// dispatch_source_set_event_handler_f() +// dispatch_source_set_event_handler() +// +// The reference manual for Grand Central Dispatch is available at +// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html +// The implementation details are at +// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c + +typedef void* dispatch_group_t; +typedef void* dispatch_queue_t; +typedef void* dispatch_source_t; +typedef u64 dispatch_time_t; +typedef void (*dispatch_function_t)(void *block); +typedef void* (*worker_t)(void *block); + +// A wrapper for the ObjC blocks used to support libdispatch. +typedef struct { + void *block; + dispatch_function_t func; + u32 parent_tid; +} asan_block_context_t; + +ALWAYS_INLINE +void asan_register_worker_thread(int parent_tid, StackTrace *stack) { + AsanThread *t = GetCurrentThread(); + if (!t) { + t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, + parent_tid, stack, /* detached */ true); + t->Init(); + asanThreadRegistry().StartThread(t->tid(), 0, 0); + SetCurrentThread(t); + } +} + +// For use by only those functions that allocated the context via +// alloc_asan_context(). +extern "C" +void asan_dispatch_call_block_and_release(void *block) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *context = (asan_block_context_t*)block; + VReport(2, + "asan_dispatch_call_block_and_release(): " + "context: %p, pthread_self: %p\n", + block, pthread_self()); + asan_register_worker_thread(context->parent_tid, &stack); + // Call the original dispatcher for the block. + context->func(context->block); + asan_free(context, &stack, FROM_MALLOC); +} + +} // namespace __asan + +using namespace __asan; // NOLINT + +// Wrap |ctxt| and |func| into an asan_block_context_t. +// The caller retains control of the allocated context. +extern "C" +asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, + BufferedStackTrace *stack) { + asan_block_context_t *asan_ctxt = + (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); + asan_ctxt->block = ctxt; + asan_ctxt->func = func; + asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); + return asan_ctxt; +} + +// Define interceptor for dispatch_*_f function with the three most common +// parameters: dispatch_queue_t, context, dispatch_function_t. +#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ + INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ + dispatch_function_t func) { \ + GET_STACK_TRACE_THREAD; \ + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ + if (common_flags()->verbosity >= 2) { \ + Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ + asan_ctxt, pthread_self()); \ + PRINT_CURRENT_STACK(); \ + } \ + return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \ + asan_dispatch_call_block_and_release); \ + } + +INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) + +INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, + dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); + if (common_flags()->verbosity >= 2) { + Report("dispatch_after_f: %p\n", asan_ctxt); + PRINT_CURRENT_STACK(); + } + return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt, + asan_dispatch_call_block_and_release); +} + +INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, + dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); + if (common_flags()->verbosity >= 2) { + Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", + asan_ctxt, pthread_self()); + PRINT_CURRENT_STACK(); + } + REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt, + asan_dispatch_call_block_and_release); +} + +#if !defined(MISSING_BLOCKS_SUPPORT) +extern "C" { +void dispatch_async(dispatch_queue_t dq, void(^work)(void)); +void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + void(^work)(void)); +void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void(^work)(void)); +void dispatch_source_set_cancel_handler(dispatch_source_t ds, + void(^work)(void)); +void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); +} + +#define GET_ASAN_BLOCK(work) \ + void (^asan_block)(void); \ + int parent_tid = GetCurrentTidOrInvalid(); \ + asan_block = ^(void) { \ + GET_STACK_TRACE_THREAD; \ + asan_register_worker_thread(parent_tid, &stack); \ + work(); \ + } + +// Forces the compiler to generate a frame pointer in the function. +#define ENABLE_FRAME_POINTER \ + do { \ + volatile uptr enable_fp; \ + enable_fp = GET_CURRENT_FRAME(); \ + } while (0) + +INTERCEPTOR(void, dispatch_async, + dispatch_queue_t dq, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_async)(dq, asan_block); +} + +INTERCEPTOR(void, dispatch_group_async, + dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_group_async)(dg, dq, asan_block); +} + +INTERCEPTOR(void, dispatch_after, + dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_after)(when, queue, asan_block); +} + +INTERCEPTOR(void, dispatch_source_set_cancel_handler, + dispatch_source_t ds, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_source_set_cancel_handler)(ds, asan_block); +} + +INTERCEPTOR(void, dispatch_source_set_event_handler, + dispatch_source_t ds, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_source_set_event_handler)(ds, asan_block); +} +#endif + +#endif // SANITIZER_MAC diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_linux.cc b/contrib/compiler-rt/lib/asan/asan_malloc_linux.cc new file mode 100644 index 000000000000..46a6a9db4a81 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_linux.cc @@ -0,0 +1,186 @@ +//===-- asan_malloc_linux.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific malloc interception. +// We simply define functions like malloc, free, realloc, etc. +// They will replace the corresponding libc functions automagically. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_LINUX + +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stack.h" + +// ---------------------- Replacement functions ---------------- {{{1 +using namespace __asan; // NOLINT + +INTERCEPTOR(void, free, void *ptr) { + GET_STACK_TRACE_FREE; + asan_free(ptr, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void, cfree, void *ptr) { + GET_STACK_TRACE_FREE; + asan_free(ptr, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void*, malloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + if (UNLIKELY(!asan_inited)) { + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + const uptr kCallocPoolSize = 1024; + static uptr calloc_memory_for_dlsym[kCallocPoolSize]; + static uptr allocated; + uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; + void *mem = (void*)&calloc_memory_for_dlsym[allocated]; + allocated += size_in_words; + CHECK(allocated < kCallocPoolSize); + return mem; + } + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +INTERCEPTOR(void*, realloc, void *ptr, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +INTERCEPTOR(void*, memalign, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_memalign(boundary, size, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_memalign(boundary, size, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC); + DTLS_on_libc_memalign(res, size * boundary); + return res; +} + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(ptr, pc, bp); +} + +// We avoid including malloc.h for portability reasons. +// man mallinfo says the fields are "long", but the implementation uses int. +// It doesn't matter much -- we just need to make sure that the libc's mallinfo +// is not called. +struct fake_mallinfo { + int x[10]; +}; + +INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { + struct fake_mallinfo res; + REAL(memset)(&res, 0, sizeof(res)); + return res; +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return -1; +} + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + GET_STACK_TRACE_MALLOC; + // Printf("posix_memalign: %zx %zu\n", alignment, size); + return asan_posix_memalign(memptr, alignment, size, &stack); +} + +INTERCEPTOR(void*, valloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_valloc(size, &stack); +} + +INTERCEPTOR(void*, pvalloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_pvalloc(size, &stack); +} + +INTERCEPTOR(void, malloc_stats, void) { + __asan_print_accumulated_stats(); +} + +#if SANITIZER_ANDROID +// Format of __libc_malloc_dispatch has changed in Android L. +// While we are moving towards a solution that does not depend on bionic +// internals, here is something to support both K* and L releases. +struct MallocDebugK { + void *(*malloc)(uptr bytes); + void (*free)(void *mem); + void *(*calloc)(uptr n_elements, uptr elem_size); + void *(*realloc)(void *oldMem, uptr bytes); + void *(*memalign)(uptr alignment, uptr bytes); + uptr (*malloc_usable_size)(void *mem); +}; + +struct MallocDebugL { + void *(*calloc)(uptr n_elements, uptr elem_size); + void (*free)(void *mem); + fake_mallinfo (*mallinfo)(void); + void *(*malloc)(uptr bytes); + uptr (*malloc_usable_size)(void *mem); + void *(*memalign)(uptr alignment, uptr bytes); + int (*posix_memalign)(void **memptr, uptr alignment, uptr size); + void* (*pvalloc)(uptr size); + void *(*realloc)(void *oldMem, uptr bytes); + void* (*valloc)(uptr size); +}; + +ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = { + WRAP(malloc), WRAP(free), WRAP(calloc), + WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)}; + +ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = { + WRAP(calloc), WRAP(free), WRAP(mallinfo), + WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign), + WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc), + WRAP(valloc)}; + +namespace __asan { +void ReplaceSystemMalloc() { + void **__libc_malloc_dispatch_p = + (void **)AsanDlSymNext("__libc_malloc_dispatch"); + if (__libc_malloc_dispatch_p) { + // Decide on K vs L dispatch format by the presence of + // __libc_malloc_default_dispatch export in libc. + void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch"); + if (default_dispatch_p) + *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k; + else + *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l; + } +} +} // namespace __asan + +#else // SANITIZER_ANDROID + +namespace __asan { +void ReplaceSystemMalloc() { +} +} // namespace __asan +#endif // SANITIZER_ANDROID + +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_mac.cc b/contrib/compiler-rt/lib/asan/asan_malloc_mac.cc new file mode 100644 index 000000000000..79b6dfae10f6 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_mac.cc @@ -0,0 +1,359 @@ +//===-- asan_malloc_mac.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include <AvailabilityMacros.h> +#include <CoreFoundation/CFBase.h> +#include <dlfcn.h> +#include <malloc/malloc.h> +#include <sys/mman.h> + +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "sanitizer_common/sanitizer_mac.h" + +// Similar code is used in Google Perftools, +// http://code.google.com/p/google-perftools. + +// ---------------------- Replacement functions ---------------- {{{1 +using namespace __asan; // NOLINT + +// TODO(glider): do we need both zones? +static malloc_zone_t *system_malloc_zone = 0; +static malloc_zone_t asan_zone; + +INTERCEPTOR(malloc_zone_t *, malloc_create_zone, + vm_size_t start_size, unsigned zone_flags) { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + uptr page_size = GetPageSizeCached(); + uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size); + malloc_zone_t *new_zone = + (malloc_zone_t*)asan_memalign(page_size, allocated_size, + &stack, FROM_MALLOC); + internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone)); + new_zone->zone_name = NULL; // The name will be changed anyway. + if (GetMacosVersion() >= MACOS_VERSION_LION) { + // Prevent the client app from overwriting the zone contents. + // Library functions that need to modify the zone will set PROT_WRITE on it. + // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher. + mprotect(new_zone, allocated_size, PROT_READ); + } + return new_zone; +} + +INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) { + ENSURE_ASAN_INITED(); + return &asan_zone; +} + +INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) { + // FIXME: ASan should support purgeable allocations. + // https://code.google.com/p/address-sanitizer/issues/detail?id=139 + ENSURE_ASAN_INITED(); + return &asan_zone; +} + +INTERCEPTOR(void, malloc_make_purgeable, void *ptr) { + // FIXME: ASan should support purgeable allocations. Ignoring them is fine + // for now. + ENSURE_ASAN_INITED(); +} + +INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) { + // FIXME: ASan should support purgeable allocations. Ignoring them is fine + // for now. + ENSURE_ASAN_INITED(); + // Must return 0 if the contents were not purged since the last call to + // malloc_make_purgeable(). + return 0; +} + +INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { + ENSURE_ASAN_INITED(); + // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. + size_t buflen = 6 + (name ? internal_strlen(name) : 0); + InternalScopedString new_name(buflen); + if (name && zone->introspect == asan_zone.introspect) { + new_name.append("asan-%s", name); + name = new_name.data(); + } + + // Call the system malloc's implementation for both external and our zones, + // since that appropriately changes VM region protections on the zone. + REAL(malloc_set_zone_name)(zone, name); +} + +INTERCEPTOR(void *, malloc, size_t size) { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + void *res = asan_malloc(size, &stack); + return res; +} + +INTERCEPTOR(void, free, void *ptr) { + ENSURE_ASAN_INITED(); + if (!ptr) return; + GET_STACK_TRACE_FREE; + asan_free(ptr, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void *, realloc, void *ptr, size_t size) { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +INTERCEPTOR(void *, valloc, size_t size) { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); +} + +INTERCEPTOR(size_t, malloc_good_size, size_t size) { + ENSURE_ASAN_INITED(); + return asan_zone.introspect->good_size(&asan_zone, size); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { + ENSURE_ASAN_INITED(); + CHECK(memptr); + GET_STACK_TRACE_MALLOC; + void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC); + if (result) { + *memptr = result; + return 0; + } + return -1; +} + +namespace { + +// TODO(glider): the mz_* functions should be united with the Linux wrappers, +// as they are basically copied from there. +size_t mz_size(malloc_zone_t* zone, const void* ptr) { + return asan_mz_size(ptr); +} + +void *mz_malloc(malloc_zone_t *zone, size_t size) { + if (UNLIKELY(!asan_inited)) { + CHECK(system_malloc_zone); + return malloc_zone_malloc(system_malloc_zone, size); + } + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { + if (UNLIKELY(!asan_inited)) { + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + const size_t kCallocPoolSize = 1024; + static uptr calloc_memory_for_dlsym[kCallocPoolSize]; + static size_t allocated; + size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; + void *mem = (void*)&calloc_memory_for_dlsym[allocated]; + allocated += size_in_words; + CHECK(allocated < kCallocPoolSize); + return mem; + } + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +void *mz_valloc(malloc_zone_t *zone, size_t size) { + if (UNLIKELY(!asan_inited)) { + CHECK(system_malloc_zone); + return malloc_zone_valloc(system_malloc_zone, size); + } + GET_STACK_TRACE_MALLOC; + return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); +} + +#define GET_ZONE_FOR_PTR(ptr) \ + malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \ + const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name + +void ALWAYS_INLINE free_common(void *context, void *ptr) { + if (!ptr) return; + GET_STACK_TRACE_FREE; + // FIXME: need to retire this flag. + if (!flags()->mac_ignore_invalid_free) { + asan_free(ptr, &stack, FROM_MALLOC); + } else { + GET_ZONE_FOR_PTR(ptr); + WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); + return; + } +} + +// TODO(glider): the allocation callbacks need to be refactored. +void mz_free(malloc_zone_t *zone, void *ptr) { + free_common(zone, ptr); +} + +void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (!ptr) { + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); + } else { + if (asan_mz_size(ptr)) { + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); + } else { + // We can't recover from reallocating an unknown address, because + // this would require reading at most |size| bytes from + // potentially unaccessible memory. + GET_STACK_TRACE_FREE; + GET_ZONE_FOR_PTR(ptr); + ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); + } + } +} + +void mz_destroy(malloc_zone_t* zone) { + // A no-op -- we will not be destroyed! + Report("mz_destroy() called -- ignoring\n"); +} + + // from AvailabilityMacros.h +#if defined(MAC_OS_X_VERSION_10_6) && \ + MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 +void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { + if (UNLIKELY(!asan_inited)) { + CHECK(system_malloc_zone); + return malloc_zone_memalign(system_malloc_zone, align, size); + } + GET_STACK_TRACE_MALLOC; + return asan_memalign(align, size, &stack, FROM_MALLOC); +} + +// This function is currently unused, and we build with -Werror. +#if 0 +void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { + // TODO(glider): check that |size| is valid. + UNIMPLEMENTED(); +} +#endif +#endif + +kern_return_t mi_enumerator(task_t task, void *, + unsigned type_mask, vm_address_t zone_address, + memory_reader_t reader, + vm_range_recorder_t recorder) { + // Should enumerate all the pointers we have. Seems like a lot of work. + return KERN_FAILURE; +} + +size_t mi_good_size(malloc_zone_t *zone, size_t size) { + // I think it's always safe to return size, but we maybe could do better. + return size; +} + +boolean_t mi_check(malloc_zone_t *zone) { + UNIMPLEMENTED(); +} + +void mi_print(malloc_zone_t *zone, boolean_t verbose) { + UNIMPLEMENTED(); +} + +void mi_log(malloc_zone_t *zone, void *address) { + // I don't think we support anything like this +} + +void mi_force_lock(malloc_zone_t *zone) { + asan_mz_force_lock(); +} + +void mi_force_unlock(malloc_zone_t *zone) { + asan_mz_force_unlock(); +} + +void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { + AsanMallocStats malloc_stats; + FillMallocStatistics(&malloc_stats); + CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); + internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); +} + +#if defined(MAC_OS_X_VERSION_10_6) && \ + MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 +boolean_t mi_zone_locked(malloc_zone_t *zone) { + // UNIMPLEMENTED(); + return false; +} +#endif + +} // unnamed namespace + +namespace __asan { + +void ReplaceSystemMalloc() { + static malloc_introspection_t asan_introspection; + // Ok to use internal_memset, these places are not performance-critical. + internal_memset(&asan_introspection, 0, sizeof(asan_introspection)); + + asan_introspection.enumerator = &mi_enumerator; + asan_introspection.good_size = &mi_good_size; + asan_introspection.check = &mi_check; + asan_introspection.print = &mi_print; + asan_introspection.log = &mi_log; + asan_introspection.force_lock = &mi_force_lock; + asan_introspection.force_unlock = &mi_force_unlock; + asan_introspection.statistics = &mi_statistics; + + internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); + + // Start with a version 4 zone which is used for OS X 10.4 and 10.5. + asan_zone.version = 4; + asan_zone.zone_name = "asan"; + asan_zone.size = &mz_size; + asan_zone.malloc = &mz_malloc; + asan_zone.calloc = &mz_calloc; + asan_zone.valloc = &mz_valloc; + asan_zone.free = &mz_free; + asan_zone.realloc = &mz_realloc; + asan_zone.destroy = &mz_destroy; + asan_zone.batch_malloc = 0; + asan_zone.batch_free = 0; + asan_zone.introspect = &asan_introspection; + + // from AvailabilityMacros.h +#if defined(MAC_OS_X_VERSION_10_6) && \ + MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 + // Switch to version 6 on OSX 10.6 to support memalign. + asan_zone.version = 6; + asan_zone.free_definite_size = 0; + asan_zone.memalign = &mz_memalign; + asan_introspection.zone_locked = &mi_zone_locked; +#endif + + // Register the ASan zone. + malloc_zone_register(&asan_zone); +} +} // namespace __asan + +#endif // SANITIZER_MAC diff --git a/contrib/compiler-rt/lib/asan/asan_malloc_win.cc b/contrib/compiler-rt/lib/asan/asan_malloc_win.cc new file mode 100644 index 000000000000..c99e31234951 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_malloc_win.cc @@ -0,0 +1,178 @@ +//===-- asan_malloc_win.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Windows-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS + +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stack.h" +#include "interception/interception.h" + +#include <stddef.h> + +using namespace __asan; // NOLINT + +// MT: Simply defining functions with the same signature in *.obj +// files overrides the standard functions in the CRT. +// MD: Memory allocation functions are defined in the CRT .dll, +// so we have to intercept them before they are called for the first time. + +#if ASAN_DYNAMIC +# define ALLOCATION_FUNCTION_ATTRIBUTE +#else +# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +#endif + +extern "C" { +ALLOCATION_FUNCTION_ATTRIBUTE +void free(void *ptr) { + GET_STACK_TRACE_FREE; + return asan_free(ptr, &stack, FROM_MALLOC); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void _free_dbg(void *ptr, int) { + free(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void cfree(void *ptr) { + CHECK(!"cfree() should not be used on Windows"); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *malloc(size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_malloc_dbg(size_t size, int, const char *, int) { + return malloc(size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *calloc(size_t nmemb, size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *realloc(void *ptr, size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_realloc_dbg(void *ptr, size_t size, int) { + CHECK(!"_realloc_dbg should not exist!"); + return 0; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_recalloc(void *p, size_t n, size_t elem_size) { + if (!p) + return calloc(n, elem_size); + const size_t size = n * elem_size; + if (elem_size != 0 && size / elem_size != n) + return 0; + return realloc(p, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +size_t _msize(void *ptr) { + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(ptr, pc, bp); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_expand(void *memblock, size_t size) { + // _expand is used in realloc-like functions to resize the buffer if possible. + // We don't want memory to stand still while resizing buffers, so return 0. + return 0; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_expand_dbg(void *memblock, size_t size) { + return _expand(memblock, size); +} + +// TODO(timurrrr): Might want to add support for _aligned_* allocation +// functions to detect a bit more bugs. Those functions seem to wrap malloc(). + +int _CrtDbgReport(int, const char*, int, + const char*, const char*, ...) { + ShowStatsAndAbort(); +} + +int _CrtDbgReportW(int reportType, const wchar_t*, int, + const wchar_t*, const wchar_t*, ...) { + ShowStatsAndAbort(); +} + +int _CrtSetReportMode(int, int) { + return 0; +} +} // extern "C" + +namespace __asan { +void ReplaceSystemMalloc() { +#if defined(ASAN_DYNAMIC) + // We don't check the result because CRT might not be used in the process. + __interception::OverrideFunction("free", (uptr)free); + __interception::OverrideFunction("malloc", (uptr)malloc); + __interception::OverrideFunction("_malloc_crt", (uptr)malloc); + __interception::OverrideFunction("calloc", (uptr)calloc); + __interception::OverrideFunction("_calloc_crt", (uptr)calloc); + __interception::OverrideFunction("realloc", (uptr)realloc); + __interception::OverrideFunction("_realloc_crt", (uptr)realloc); + __interception::OverrideFunction("_recalloc", (uptr)_recalloc); + __interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc); + __interception::OverrideFunction("_msize", (uptr)_msize); + __interception::OverrideFunction("_expand", (uptr)_expand); + + // Override different versions of 'operator new' and 'operator delete'. + // No need to override the nothrow versions as they just wrap the throw + // versions. + // FIXME: Unfortunately, MSVC miscompiles the statements that take the + // addresses of the array versions of these operators, + // see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992 + // We might want to try to work around this by [inline] assembly or compiling + // parts of the RTL with Clang. + void *(*op_new)(size_t sz) = operator new; + void (*op_delete)(void *p) = operator delete; + void *(*op_array_new)(size_t sz) = operator new[]; + void (*op_array_delete)(void *p) = operator delete[]; + __interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new); + __interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete); + __interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new); + __interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete); +#endif +} +} // namespace __asan + +#endif // _WIN32 diff --git a/contrib/compiler-rt/lib/asan/asan_mapping.h b/contrib/compiler-rt/lib/asan/asan_mapping.h new file mode 100644 index 000000000000..2746754152b6 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_mapping.h @@ -0,0 +1,271 @@ +//===-- asan_mapping.h ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Defines ASan memory mapping. +//===----------------------------------------------------------------------===// +#ifndef ASAN_MAPPING_H +#define ASAN_MAPPING_H + +#include "asan_internal.h" + +// The full explanation of the memory mapping could be found here: +// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm +// +// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000: +// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || +// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || +// || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap || +// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || +// || `[0x000000000000, 0x00007fff7fff]` || LowMem || +// +// When SHADOW_OFFSET is zero (-pie): +// || `[0x100000000000, 0x7fffffffffff]` || HighMem || +// || `[0x020000000000, 0x0fffffffffff]` || HighShadow || +// || `[0x000000040000, 0x01ffffffffff]` || ShadowGap || +// +// Special case when something is already mapped between +// 0x003000000000 and 0x005000000000 (e.g. when prelink is installed): +// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || +// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || +// || `[0x005000000000, 0x02008fff6fff]` || ShadowGap3 || +// || `[0x003000000000, 0x004fffffffff]` || MidMem || +// || `[0x000a7fff8000, 0x002fffffffff]` || ShadowGap2 || +// || `[0x00067fff8000, 0x000a7fff7fff]` || MidShadow || +// || `[0x00008fff7000, 0x00067fff7fff]` || ShadowGap || +// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || +// || `[0x000000000000, 0x00007fff7fff]` || LowMem || +// +// Default Linux/i386 mapping on x86_64 machine: +// || `[0x40000000, 0xffffffff]` || HighMem || +// || `[0x28000000, 0x3fffffff]` || HighShadow || +// || `[0x24000000, 0x27ffffff]` || ShadowGap || +// || `[0x20000000, 0x23ffffff]` || LowShadow || +// || `[0x00000000, 0x1fffffff]` || LowMem || +// +// Default Linux/i386 mapping on i386 machine +// (addresses starting with 0xc0000000 are reserved +// for kernel and thus not sanitized): +// || `[0x38000000, 0xbfffffff]` || HighMem || +// || `[0x27000000, 0x37ffffff]` || HighShadow || +// || `[0x24000000, 0x26ffffff]` || ShadowGap || +// || `[0x20000000, 0x23ffffff]` || LowShadow || +// || `[0x00000000, 0x1fffffff]` || LowMem || +// +// Default Linux/MIPS mapping: +// || `[0x2aaa0000, 0xffffffff]` || HighMem || +// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || +// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || +// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || +// || `[0x00000000, 0x0aa9ffff]` || LowMem || +// +// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: +// || `[0x500000000000, 0x7fffffffffff]` || HighMem || +// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || +// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap || +// || `[0x400000000000, 0x47ffffffffff]` || LowShadow || +// || `[0x000000000000, 0x3fffffffffff]` || LowMem || +// +// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000: +// || `[0x60000000, 0xffffffff]` || HighMem || +// || `[0x4c000000, 0x5fffffff]` || HighShadow || +// || `[0x48000000, 0x4bffffff]` || ShadowGap || +// || `[0x40000000, 0x47ffffff]` || LowShadow || +// || `[0x00000000, 0x3fffffff]` || LowMem || + +static const u64 kDefaultShadowScale = 3; +static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 +static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kDefaultShadowOffset64 = 1ULL << 44; +static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. +static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; +static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; +static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36; +static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; +static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 + +#define SHADOW_SCALE kDefaultShadowScale +#if SANITIZER_ANDROID +# define SHADOW_OFFSET (0) +#else +# if SANITIZER_WORDSIZE == 32 +# if defined(__mips__) +# define SHADOW_OFFSET kMIPS32_ShadowOffset32 +# elif SANITIZER_FREEBSD +# define SHADOW_OFFSET kFreeBSD_ShadowOffset32 +# else +# if SANITIZER_IOS +# define SHADOW_OFFSET kIosShadowOffset32 +# else +# define SHADOW_OFFSET kDefaultShadowOffset32 +# endif +# endif +# else +# if defined(__aarch64__) +# define SHADOW_OFFSET kAArch64_ShadowOffset64 +# elif defined(__powerpc64__) +# define SHADOW_OFFSET kPPC64_ShadowOffset64 +# elif SANITIZER_FREEBSD +# define SHADOW_OFFSET kFreeBSD_ShadowOffset64 +# elif SANITIZER_MAC +# define SHADOW_OFFSET kDefaultShadowOffset64 +# elif defined(__mips64) +# define SHADOW_OFFSET kMIPS64_ShadowOffset64 +# else +# define SHADOW_OFFSET kDefaultShort64bitShadowOffset +# endif +# endif +#endif + +#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) +#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) +#define SHADOW_TO_MEM(shadow) (((shadow) - SHADOW_OFFSET) << SHADOW_SCALE) + +#define kLowMemBeg 0 +#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) + +#define kLowShadowBeg SHADOW_OFFSET +#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) + +#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1) + +#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) +#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) + +# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) +# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) + +// With the zero shadow base we can not actually map pages starting from 0. +// This constant is somewhat arbitrary. +#define kZeroBaseShadowStart (1 << 18) + +#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ + : kZeroBaseShadowStart) +#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) + +#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) +#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) + +#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) +#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) + +#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. + +#if DO_ASAN_MAPPING_PROFILE +# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; +#else +# define PROFILE_ASAN_MAPPING() +#endif + +// If 1, all shadow boundaries are constants. +// Don't set to 1 other than for testing. +#define ASAN_FIXED_MAPPING 0 + +namespace __asan { + +extern uptr AsanMappingProfile[]; + +#if ASAN_FIXED_MAPPING +// Fixed mapping for 64-bit Linux. Mostly used for performance comparison +// with non-fixed mapping. As of r175253 (Feb 2013) the performance +// difference between fixed and non-fixed mapping is below the noise level. +static uptr kHighMemEnd = 0x7fffffffffffULL; +static uptr kMidMemBeg = 0x3000000000ULL; +static uptr kMidMemEnd = 0x4fffffffffULL; +#else +extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. +#endif + +static inline bool AddrIsInLowMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return a < kLowMemEnd; +} + +static inline bool AddrIsInLowShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return a >= kLowShadowBeg && a <= kLowShadowEnd; +} + +static inline bool AddrIsInHighMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return a >= kHighMemBeg && a <= kHighMemEnd; +} + +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; +} + +static inline bool AddrIsInMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a); +} + +static inline uptr MemToShadow(uptr p) { + PROFILE_ASAN_MAPPING(); + CHECK(AddrIsInMem(p)); + return MEM_TO_SHADOW(p); +} + +static inline bool AddrIsInHighShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return a >= kHighShadowBeg && a <= kHighMemEnd; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidShadowBeg && a <= kMidMemEnd; +} + +static inline bool AddrIsInShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); +} + +static inline bool AddrIsInShadowGap(uptr a) { + PROFILE_ASAN_MAPPING(); + if (kMidMemBeg) { + if (a <= kShadowGapEnd) + return SHADOW_OFFSET == 0 || a >= kShadowGapBeg; + return (a >= kShadowGap2Beg && a <= kShadowGap2End) || + (a >= kShadowGap3Beg && a <= kShadowGap3End); + } + // In zero-based shadow mode we treat addresses near zero as addresses + // in shadow gap as well. + if (SHADOW_OFFSET == 0) + return a <= kShadowGapEnd; + return a >= kShadowGapBeg && a <= kShadowGapEnd; +} + +static inline bool AddrIsAlignedByGranularity(uptr a) { + PROFILE_ASAN_MAPPING(); + return (a & (SHADOW_GRANULARITY - 1)) == 0; +} + +static inline bool AddressIsPoisoned(uptr a) { + PROFILE_ASAN_MAPPING(); + const uptr kAccessSize = 1; + u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); + s8 shadow_value = *shadow_address; + if (shadow_value) { + u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1)) + + kAccessSize - 1; + return (last_accessed_byte >= shadow_value); + } + return false; +} + +// Must be after all calls to PROFILE_ASAN_MAPPING(). +static const uptr kAsanMappingProfileSize = __LINE__; + +} // namespace __asan + +#endif // ASAN_MAPPING_H diff --git a/contrib/compiler-rt/lib/asan/asan_new_delete.cc b/contrib/compiler-rt/lib/asan/asan_new_delete.cc new file mode 100644 index 000000000000..e48bdaf03dd3 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_new_delete.cc @@ -0,0 +1,132 @@ +//===-- asan_interceptors.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Interceptors for operators new and delete. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_stack.h" + +#include "interception/interception.h" + +#include <stddef.h> + +// C++ operators can't have visibility attributes on Windows. +#if SANITIZER_WINDOWS +# define CXX_OPERATOR_ATTRIBUTE +#else +# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE +#endif + +using namespace __asan; // NOLINT + +// This code has issues on OSX. +// See https://code.google.com/p/address-sanitizer/issues/detail?id=131. + +// Fake std::nothrow_t to avoid including <new>. +namespace std { +struct nothrow_t {}; +} // namespace std + +#define OPERATOR_NEW_BODY(type) \ + GET_STACK_TRACE_MALLOC;\ + return asan_memalign(0, size, &stack, type); + +// On OS X it's not enough to just provide our own 'operator new' and +// 'operator delete' implementations, because they're going to be in the +// runtime dylib, and the main executable will depend on both the runtime +// dylib and libstdc++, each of those'll have its implementation of new and +// delete. +// To make sure that C++ allocation/deallocation operators are overridden on +// OS X we need to intercept them using their mangled names. +#if !SANITIZER_MAC +// FreeBSD prior v9.2 have wrong definition of 'size_t'. +// http://svnweb.freebsd.org/base?view=revision&revision=232261 +#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 +#include <sys/param.h> +#if __FreeBSD_version <= 902001 // v9.2 +#define size_t unsigned +#endif // __FreeBSD_version +#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 + +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FROM_NEW_BR); } + +#else // SANITIZER_MAC +INTERCEPTOR(void *, _Znwm, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW); +} +INTERCEPTOR(void *, _Znam, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW_BR); +} +INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW); +} +INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW_BR); +} +#endif + +#define OPERATOR_DELETE_BODY(type) \ + GET_STACK_TRACE_FREE;\ + asan_free(ptr, &stack, type); + +#if !SANITIZER_MAC +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr) throw() { + OPERATOR_DELETE_BODY(FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr) throw() { + OPERATOR_DELETE_BODY(FROM_NEW_BR); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(FROM_NEW_BR); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) throw() { + GET_STACK_TRACE_FREE; + asan_sized_free(ptr, size, &stack, FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) throw() { + GET_STACK_TRACE_FREE; + asan_sized_free(ptr, size, &stack, FROM_NEW_BR); +} + +#else // SANITIZER_MAC +INTERCEPTOR(void, _ZdlPv, void *ptr) { + OPERATOR_DELETE_BODY(FROM_NEW); +} +INTERCEPTOR(void, _ZdaPv, void *ptr) { + OPERATOR_DELETE_BODY(FROM_NEW_BR); +} +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(FROM_NEW); +} +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(FROM_NEW_BR); +} +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_poisoning.cc b/contrib/compiler-rt/lib/asan/asan_poisoning.cc new file mode 100644 index 000000000000..1c6e92f69c65 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_poisoning.cc @@ -0,0 +1,416 @@ +//===-- asan_poisoning.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Shadow memory poisoning by ASan RTL and by user application. +//===----------------------------------------------------------------------===// + +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +void PoisonShadow(uptr addr, uptr size, u8 value) { + if (!flags()->poison_heap) return; + CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); + CHECK(AddrIsAlignedByGranularity(addr + size)); + CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); + CHECK(REAL(memset)); + FastPoisonShadow(addr, size, value); +} + +void PoisonShadowPartialRightRedzone(uptr addr, + uptr size, + uptr redzone_size, + u8 value) { + if (!flags()->poison_heap) return; + CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); + FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); +} + +struct ShadowSegmentEndpoint { + u8 *chunk; + s8 offset; // in [0, SHADOW_GRANULARITY) + s8 value; // = *chunk; + + explicit ShadowSegmentEndpoint(uptr address) { + chunk = (u8*)MemToShadow(address); + offset = address & (SHADOW_GRANULARITY - 1); + value = *chunk; + } +}; + +void FlushUnneededASanShadowMemory(uptr p, uptr size) { + // Since asan's mapping is compacting, the shadow chunk may be + // not page-aligned, so we only flush the page-aligned portion. + uptr page_size = GetPageSizeCached(); + uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); + uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); + FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); +} + +void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { + uptr end = ptr + size; + if (common_flags()->verbosity) { + Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", + poison ? "" : "un", ptr, end, size); + if (common_flags()->verbosity >= 2) + PRINT_CURRENT_STACK(); + } + CHECK(size); + CHECK_LE(size, 4096); + CHECK(IsAligned(end, SHADOW_GRANULARITY)); + if (!IsAligned(ptr, SHADOW_GRANULARITY)) { + *(u8 *)MemToShadow(ptr) = + poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0; + ptr |= SHADOW_GRANULARITY - 1; + ptr++; + } + for (; ptr < end; ptr += SHADOW_GRANULARITY) + *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// Current implementation of __asan_(un)poison_memory_region doesn't check +// that user program (un)poisons the memory it owns. It poisons memory +// conservatively, and unpoisons progressively to make sure asan shadow +// mapping invariant is preserved (see detailed mapping description here: +// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm). +// +// * if user asks to poison region [left, right), the program poisons +// at least [left, AlignDown(right)). +// * if user asks to unpoison region [left, right), the program unpoisons +// at most [AlignDown(left), right). +void __asan_poison_memory_region(void const volatile *addr, uptr size) { + if (!flags()->allow_user_poisoning || size == 0) return; + uptr beg_addr = (uptr)addr; + uptr end_addr = beg_addr + size; + VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, + (void *)end_addr); + ShadowSegmentEndpoint beg(beg_addr); + ShadowSegmentEndpoint end(end_addr); + if (beg.chunk == end.chunk) { + CHECK(beg.offset < end.offset); + s8 value = beg.value; + CHECK(value == end.value); + // We can only poison memory if the byte in end.offset is unaddressable. + // No need to re-poison memory if it is poisoned already. + if (value > 0 && value <= end.offset) { + if (beg.offset > 0) { + *beg.chunk = Min(value, beg.offset); + } else { + *beg.chunk = kAsanUserPoisonedMemoryMagic; + } + } + return; + } + CHECK(beg.chunk < end.chunk); + if (beg.offset > 0) { + // Mark bytes from beg.offset as unaddressable. + if (beg.value == 0) { + *beg.chunk = beg.offset; + } else { + *beg.chunk = Min(beg.value, beg.offset); + } + beg.chunk++; + } + REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk); + // Poison if byte in end.offset is unaddressable. + if (end.value > 0 && end.value <= end.offset) { + *end.chunk = kAsanUserPoisonedMemoryMagic; + } +} + +void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { + if (!flags()->allow_user_poisoning || size == 0) return; + uptr beg_addr = (uptr)addr; + uptr end_addr = beg_addr + size; + VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, + (void *)end_addr); + ShadowSegmentEndpoint beg(beg_addr); + ShadowSegmentEndpoint end(end_addr); + if (beg.chunk == end.chunk) { + CHECK(beg.offset < end.offset); + s8 value = beg.value; + CHECK(value == end.value); + // We unpoison memory bytes up to enbytes up to end.offset if it is not + // unpoisoned already. + if (value != 0) { + *beg.chunk = Max(value, end.offset); + } + return; + } + CHECK(beg.chunk < end.chunk); + if (beg.offset > 0) { + *beg.chunk = 0; + beg.chunk++; + } + REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk); + if (end.offset > 0 && end.value != 0) { + *end.chunk = Max(end.value, end.offset); + } +} + +int __asan_address_is_poisoned(void const volatile *addr) { + return __asan::AddressIsPoisoned((uptr)addr); +} + +uptr __asan_region_is_poisoned(uptr beg, uptr size) { + if (!size) return 0; + uptr end = beg + size; + if (!AddrIsInMem(beg)) return beg; + if (!AddrIsInMem(end)) return end; + CHECK_LT(beg, end); + uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); + uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); + uptr shadow_beg = MemToShadow(aligned_b); + uptr shadow_end = MemToShadow(aligned_e); + // First check the first and the last application bytes, + // then check the SHADOW_GRANULARITY-aligned region by calling + // mem_is_zero on the corresponding shadow. + if (!__asan::AddressIsPoisoned(beg) && + !__asan::AddressIsPoisoned(end - 1) && + (shadow_end <= shadow_beg || + __sanitizer::mem_is_zero((const char *)shadow_beg, + shadow_end - shadow_beg))) + return 0; + // The fast check failed, so we have a poisoned byte somewhere. + // Find it slowly. + for (; beg < end; beg++) + if (__asan::AddressIsPoisoned(beg)) + return beg; + UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found"); + return 0; +} + +#define CHECK_SMALL_REGION(p, size, isWrite) \ + do { \ + uptr __p = reinterpret_cast<uptr>(p); \ + uptr __size = size; \ + if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ + __asan::AddressIsPoisoned(__p + __size - 1))) { \ + GET_CURRENT_PC_BP_SP; \ + uptr __bad = __asan_region_is_poisoned(__p, __size); \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\ + } \ + } while (false); \ + + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u16 __sanitizer_unaligned_load16(const uu16 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u32 __sanitizer_unaligned_load32(const uu32 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u64 __sanitizer_unaligned_load64(const uu64 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store16(uu16 *p, u16 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store32(uu32 *p, u32 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store64(uu64 *p, u64 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_poison_cxx_array_cookie(uptr p) { + if (SANITIZER_WORDSIZE != 64) return; + if (!flags()->poison_array_cookie) return; + uptr s = MEM_TO_SHADOW(p); + *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_load_cxx_array_cookie(uptr *p) { + if (SANITIZER_WORDSIZE != 64) return *p; + if (!flags()->poison_array_cookie) return *p; + uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p)); + u8 sval = *reinterpret_cast<u8*>(s); + if (sval == kAsanArrayCookieMagic) return *p; + // If sval is not kAsanArrayCookieMagic it can only be freed memory, + // which means that we are going to get double-free. So, return 0 to avoid + // infinite loop of destructors. We don't want to report a double-free here + // though, so print a warning just in case. + // CHECK_EQ(sval, kAsanHeapFreeMagic); + if (sval == kAsanHeapFreeMagic) { + Report("AddressSanitizer: loaded array cookie from free-d memory; " + "expect a double-free report\n"); + return 0; + } + // The cookie may remain unpoisoned if e.g. it comes from a custom + // operator new defined inside a class. + return *p; +} + +// This is a simplified version of __asan_(un)poison_memory_region, which +// assumes that left border of region to be poisoned is properly aligned. +static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { + if (size == 0) return; + uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); + PoisonShadow(addr, aligned_size, + do_poison ? kAsanStackUseAfterScopeMagic : 0); + if (size == aligned_size) + return; + s8 end_offset = (s8)(size - aligned_size); + s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); + s8 end_value = *shadow_end; + if (do_poison) { + // If possible, mark all the bytes mapping to last shadow byte as + // unaddressable. + if (end_value > 0 && end_value <= end_offset) + *shadow_end = (s8)kAsanStackUseAfterScopeMagic; + } else { + // If necessary, mark few first bytes mapping to last shadow byte + // as addressable + if (end_value != 0) + *shadow_end = Max(end_value, end_offset); + } +} + +void __asan_poison_stack_memory(uptr addr, uptr size) { + VReport(1, "poisoning: %p %zx\n", (void *)addr, size); + PoisonAlignedStackMemory(addr, size, true); +} + +void __asan_unpoison_stack_memory(uptr addr, uptr size) { + VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size); + PoisonAlignedStackMemory(addr, size, false); +} + +void __sanitizer_annotate_contiguous_container(const void *beg_p, + const void *end_p, + const void *old_mid_p, + const void *new_mid_p) { + if (!flags()->detect_container_overflow) return; + VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p, + new_mid_p); + uptr beg = reinterpret_cast<uptr>(beg_p); + uptr end = reinterpret_cast<uptr>(end_p); + uptr old_mid = reinterpret_cast<uptr>(old_mid_p); + uptr new_mid = reinterpret_cast<uptr>(new_mid_p); + uptr granularity = SHADOW_GRANULARITY; + if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && + IsAligned(beg, granularity))) { + GET_STACK_TRACE_FATAL_HERE; + ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid, + &stack); + } + CHECK_LE(end - beg, + FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check. + + uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); + uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); + uptr d1 = RoundDownTo(old_mid, granularity); + // uptr d2 = RoundUpTo(old_mid, granularity); + // Currently we should be in this state: + // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good. + // Make a quick sanity check that we are indeed in this state. + // + // FIXME: Two of these three checks are disabled until we fix + // https://code.google.com/p/address-sanitizer/issues/detail?id=258. + // if (d1 != d2) + // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1); + if (a + granularity <= d1) + CHECK_EQ(*(u8*)MemToShadow(a), 0); + // if (d2 + granularity <= c && c <= end) + // CHECK_EQ(*(u8 *)MemToShadow(c - granularity), + // kAsanContiguousContainerOOBMagic); + + uptr b1 = RoundDownTo(new_mid, granularity); + uptr b2 = RoundUpTo(new_mid, granularity); + // New state: + // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good. + PoisonShadow(a, b1 - a, 0); + PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic); + if (b1 != b2) { + CHECK_EQ(b2 - b1, granularity); + *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1); + } +} + +int __sanitizer_verify_contiguous_container(const void *beg_p, + const void *mid_p, + const void *end_p) { + if (!flags()->detect_container_overflow) return 1; + uptr beg = reinterpret_cast<uptr>(beg_p); + uptr end = reinterpret_cast<uptr>(end_p); + uptr mid = reinterpret_cast<uptr>(mid_p); + CHECK_LE(beg, mid); + CHECK_LE(mid, end); + // Check some bytes starting from beg, some bytes around mid, and some bytes + // ending with end. + uptr kMaxRangeToCheck = 32; + uptr r1_beg = beg; + uptr r1_end = Min(end + kMaxRangeToCheck, mid); + uptr r2_beg = Max(beg, mid - kMaxRangeToCheck); + uptr r2_end = Min(end, mid + kMaxRangeToCheck); + uptr r3_beg = Max(end - kMaxRangeToCheck, mid); + uptr r3_end = end; + for (uptr i = r1_beg; i < r1_end; i++) + if (AddressIsPoisoned(i)) + return 0; + for (uptr i = r2_beg; i < mid; i++) + if (AddressIsPoisoned(i)) + return 0; + for (uptr i = mid; i < r2_end; i++) + if (!AddressIsPoisoned(i)) + return 0; + for (uptr i = r3_beg; i < r3_end; i++) + if (!AddressIsPoisoned(i)) + return 0; + return 1; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_poison_intra_object_redzone(uptr ptr, uptr size) { + AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true); +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) { + AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false); +} + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool WordIsPoisoned(uptr addr) { + return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); +} +} + diff --git a/contrib/compiler-rt/lib/asan/asan_poisoning.h b/contrib/compiler-rt/lib/asan/asan_poisoning.h new file mode 100644 index 000000000000..feda1a984544 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_poisoning.h @@ -0,0 +1,90 @@ +//===-- asan_poisoning.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Shadow memory poisoning by ASan RTL and by user application. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +// Poisons the shadow memory for "size" bytes starting from "addr". +void PoisonShadow(uptr addr, uptr size, u8 value); + +// Poisons the shadow memory for "redzone_size" bytes starting from +// "addr + size". +void PoisonShadowPartialRightRedzone(uptr addr, + uptr size, + uptr redzone_size, + u8 value); + +// Fast versions of PoisonShadow and PoisonShadowPartialRightRedzone that +// assume that memory addresses are properly aligned. Use in +// performance-critical code with care. +ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, + u8 value) { + DCHECK(flags()->poison_heap); + uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); + uptr shadow_end = MEM_TO_SHADOW( + aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; + // FIXME: Page states are different on Windows, so using the same interface + // for mapping shadow and zeroing out pages doesn't "just work", so we should + // probably provide higher-level interface for these operations. + // For now, just memset on Windows. + if (value || + SANITIZER_WINDOWS == 1 || + shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) { + REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); + } else { + uptr page_size = GetPageSizeCached(); + uptr page_beg = RoundUpTo(shadow_beg, page_size); + uptr page_end = RoundDownTo(shadow_end, page_size); + + if (page_beg >= page_end) { + REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg); + } else { + if (page_beg != shadow_beg) { + REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg); + } + if (page_end != shadow_end) { + REAL(memset)((void *)page_end, 0, shadow_end - page_end); + } + void *res = MmapFixedNoReserve(page_beg, page_end - page_beg); + CHECK_EQ(page_beg, res); + } + } +} + +ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( + uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { + DCHECK(flags()->poison_heap); + bool poison_partial = flags()->poison_partial; + u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); + for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { + if (i + SHADOW_GRANULARITY <= size) { + *shadow = 0; // fully addressable + } else if (i >= size) { + *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable + } else { + // first size-i bytes are addressable + *shadow = poison_partial ? static_cast<u8>(size - i) : 0; + } + } +} + +// Calls __sanitizer::FlushUnneededShadowMemory() on +// [MemToShadow(p), MemToShadow(p+size)] with proper rounding. +void FlushUnneededASanShadowMemory(uptr p, uptr size); + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_posix.cc b/contrib/compiler-rt/lib/asan/asan_posix.cc new file mode 100644 index 000000000000..ad31458df28d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_posix.cc @@ -0,0 +1,119 @@ +//===-- asan_posix.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Posix-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_POSIX + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#include <pthread.h> +#include <signal.h> +#include <stdlib.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <unistd.h> + +namespace __asan { + +SignalContext SignalContext::Create(void *siginfo, void *context) { + uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr; + uptr pc, sp, bp; + GetPcSpBp(context, &pc, &sp, &bp); + return SignalContext(context, addr, pc, sp, bp); +} + +void AsanOnSIGSEGV(int, void *siginfo, void *context) { + ScopedDeadlySignal signal_scope(GetCurrentThread()); + int code = (int)((siginfo_t*)siginfo)->si_code; + // Write the first message using the bullet-proof write. + if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die(); + SignalContext sig = SignalContext::Create(siginfo, context); + + // Access at a reasonable offset above SP, or slightly below it (to account + // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is + // probably a stack overflow. + bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF; + +#if __powerpc__ + // Large stack frames can be allocated with e.g. + // lis r0,-10000 + // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000 + // If the store faults then sp will not have been updated, so test above + // will not work, becase the fault address will be more than just "slightly" + // below sp. + if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) { + u32 inst = *(unsigned *)sig.pc; + u32 ra = (inst >> 16) & 0x1F; + u32 opcd = inst >> 26; + u32 xo = (inst >> 1) & 0x3FF; + // Check for store-with-update to sp. The instructions we accept are: + // stbu rs,d(ra) stbux rs,ra,rb + // sthu rs,d(ra) sthux rs,ra,rb + // stwu rs,d(ra) stwux rs,ra,rb + // stdu rs,ds(ra) stdux rs,ra,rb + // where ra is r1 (the stack pointer). + if (ra == 1 && + (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 || + (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181)))) + IsStackAccess = true; + } +#endif // __powerpc__ + + // We also check si_code to filter out SEGV caused by something else other + // then hitting the guard page or unmapped memory, like, for example, + // unaligned memory access. + if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) + ReportStackOverflow(sig); + else + ReportSIGSEGV("SEGV", sig); +} + +// ---------------------- TSD ---------------- {{{1 + +static pthread_key_t tsd_key; +static bool tsd_key_inited = false; +void AsanTSDInit(void (*destructor)(void *tsd)) { + CHECK(!tsd_key_inited); + tsd_key_inited = true; + CHECK_EQ(0, pthread_key_create(&tsd_key, destructor)); +} + +void *AsanTSDGet() { + CHECK(tsd_key_inited); + return pthread_getspecific(tsd_key); +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_key_inited); + pthread_setspecific(tsd_key, tsd); +} + +void PlatformTSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + if (context->destructor_iterations > 1) { + context->destructor_iterations--; + CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); + return; + } + AsanThread::TSDDtor(tsd); +} +} // namespace __asan + +#endif // SANITIZER_POSIX diff --git a/contrib/compiler-rt/lib/asan/asan_preinit.cc b/contrib/compiler-rt/lib/asan/asan_preinit.cc new file mode 100644 index 000000000000..a3986d24f9c3 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_preinit.cc @@ -0,0 +1,25 @@ +//===-- asan_preinit.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Call __asan_init at the very early stage of process startup. +//===----------------------------------------------------------------------===// +#include "asan_internal.h" + +using namespace __asan; + +#if SANITIZER_CAN_USE_PREINIT_ARRAY + // The symbol is called __local_asan_preinit, because it's not intended to be + // exported. + // This code linked into the main executable when -fsanitize=address is in + // the link flags. It can only use exported interface functions. + __attribute__((section(".preinit_array"), used)) + void (*__local_asan_preinit)(void) = __asan_init; +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_report.cc b/contrib/compiler-rt/lib/asan/asan_report.cc new file mode 100644 index 000000000000..0fb50276186b --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_report.cc @@ -0,0 +1,1088 @@ +//===-- asan_report.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file contains error reporting code. +//===----------------------------------------------------------------------===// +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +// -------------------- User-specified callbacks ----------------- {{{1 +static void (*error_report_callback)(const char*); +static char *error_message_buffer = 0; +static uptr error_message_buffer_pos = 0; +static uptr error_message_buffer_size = 0; + +struct ReportData { + uptr pc; + uptr sp; + uptr bp; + uptr addr; + bool is_write; + uptr access_size; + const char *description; +}; + +static bool report_happened = false; +static ReportData report_data = {}; + +void AppendToErrorMessageBuffer(const char *buffer) { + if (error_message_buffer) { + uptr length = internal_strlen(buffer); + CHECK_GE(error_message_buffer_size, error_message_buffer_pos); + uptr remaining = error_message_buffer_size - error_message_buffer_pos; + internal_strncpy(error_message_buffer + error_message_buffer_pos, + buffer, remaining); + error_message_buffer[error_message_buffer_size - 1] = '\0'; + // FIXME: reallocate the buffer instead of truncating the message. + error_message_buffer_pos += remaining > length ? length : remaining; + } +} + +// ---------------------- Decorator ------------------------------ {{{1 +class Decorator: public __sanitizer::SanitizerCommonDecorator { + public: + Decorator() : SanitizerCommonDecorator() { } + const char *Access() { return Blue(); } + const char *EndAccess() { return Default(); } + const char *Location() { return Green(); } + const char *EndLocation() { return Default(); } + const char *Allocation() { return Magenta(); } + const char *EndAllocation() { return Default(); } + + const char *ShadowByte(u8 byte) { + switch (byte) { + case kAsanHeapLeftRedzoneMagic: + case kAsanHeapRightRedzoneMagic: + case kAsanArrayCookieMagic: + return Red(); + case kAsanHeapFreeMagic: + return Magenta(); + case kAsanStackLeftRedzoneMagic: + case kAsanStackMidRedzoneMagic: + case kAsanStackRightRedzoneMagic: + case kAsanStackPartialRedzoneMagic: + return Red(); + case kAsanStackAfterReturnMagic: + return Magenta(); + case kAsanInitializationOrderMagic: + return Cyan(); + case kAsanUserPoisonedMemoryMagic: + case kAsanContiguousContainerOOBMagic: + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + return Blue(); + case kAsanStackUseAfterScopeMagic: + return Magenta(); + case kAsanGlobalRedzoneMagic: + return Red(); + case kAsanInternalHeapMagic: + return Yellow(); + case kAsanIntraObjectRedzone: + return Yellow(); + default: + return Default(); + } + } + const char *EndShadowByte() { return Default(); } + const char *MemoryByte() { return Magenta(); } + const char *EndMemoryByte() { return Default(); } +}; + +// ---------------------- Helper functions ----------------------- {{{1 + +static void PrintMemoryByte(InternalScopedString *str, const char *before, + u8 byte, bool in_shadow, const char *after = "\n") { + Decorator d; + str->append("%s%s%x%x%s%s", before, + in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), + byte >> 4, byte & 15, + in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after); +} + +static void PrintShadowByte(InternalScopedString *str, const char *before, + u8 byte, const char *after = "\n") { + PrintMemoryByte(str, before, byte, /*in_shadow*/true, after); +} + +static void PrintShadowBytes(InternalScopedString *str, const char *before, + u8 *bytes, u8 *guilty, uptr n) { + Decorator d; + if (before) str->append("%s%p:", before, bytes); + for (uptr i = 0; i < n; i++) { + u8 *p = bytes + i; + const char *before = + p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " "; + const char *after = p == guilty ? "]" : ""; + PrintShadowByte(str, before, *p, after); + } + str->append("\n"); +} + +static void PrintLegend(InternalScopedString *str) { + str->append( + "Shadow byte legend (one shadow byte represents %d " + "application bytes):\n", + (int)SHADOW_GRANULARITY); + PrintShadowByte(str, " Addressable: ", 0); + str->append(" Partially addressable: "); + for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " "); + str->append("\n"); + PrintShadowByte(str, " Heap left redzone: ", + kAsanHeapLeftRedzoneMagic); + PrintShadowByte(str, " Heap right redzone: ", + kAsanHeapRightRedzoneMagic); + PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic); + PrintShadowByte(str, " Stack left redzone: ", + kAsanStackLeftRedzoneMagic); + PrintShadowByte(str, " Stack mid redzone: ", + kAsanStackMidRedzoneMagic); + PrintShadowByte(str, " Stack right redzone: ", + kAsanStackRightRedzoneMagic); + PrintShadowByte(str, " Stack partial redzone: ", + kAsanStackPartialRedzoneMagic); + PrintShadowByte(str, " Stack after return: ", + kAsanStackAfterReturnMagic); + PrintShadowByte(str, " Stack use after scope: ", + kAsanStackUseAfterScopeMagic); + PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic); + PrintShadowByte(str, " Global init order: ", + kAsanInitializationOrderMagic); + PrintShadowByte(str, " Poisoned by user: ", + kAsanUserPoisonedMemoryMagic); + PrintShadowByte(str, " Container overflow: ", + kAsanContiguousContainerOOBMagic); + PrintShadowByte(str, " Array cookie: ", + kAsanArrayCookieMagic); + PrintShadowByte(str, " Intra object redzone: ", + kAsanIntraObjectRedzone); + PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); + PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); + PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); +} + +void MaybeDumpInstructionBytes(uptr pc) { + if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) + return; + InternalScopedString str(1024); + str.append("First 16 instruction bytes at pc: "); + if (IsAccessibleMemoryRange(pc, 16)) { + for (int i = 0; i < 16; ++i) { + PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/false, " "); + } + str.append("\n"); + } else { + str.append("unaccessible\n"); + } + Report("%s", str.data()); +} + +static void PrintShadowMemoryForAddress(uptr addr) { + if (!AddrIsInMem(addr)) return; + uptr shadow_addr = MemToShadow(addr); + const uptr n_bytes_per_row = 16; + uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); + InternalScopedString str(4096 * 8); + str.append("Shadow bytes around the buggy address:\n"); + for (int i = -5; i <= 5; i++) { + const char *prefix = (i == 0) ? "=>" : " "; + PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row), + (u8 *)shadow_addr, n_bytes_per_row); + } + if (flags()->print_legend) PrintLegend(&str); + Printf("%s", str.data()); +} + +static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, + const char *zone_name) { + if (zone_ptr) { + if (zone_name) { + Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", + ptr, zone_ptr, zone_name); + } else { + Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", + ptr, zone_ptr); + } + } else { + Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); + } +} + +static void DescribeThread(AsanThread *t) { + if (t) + DescribeThread(t->context()); +} + +// ---------------------- Address Descriptions ------------------- {{{1 + +static bool IsASCII(unsigned char c) { + return /*0x00 <= c &&*/ c <= 0x7F; +} + +static const char *MaybeDemangleGlobalName(const char *name) { + // We can spoil names of globals with C linkage, so use an heuristic + // approach to check if the name should be demangled. + bool should_demangle = false; + if (name[0] == '_' && name[1] == 'Z') + should_demangle = true; + else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?') + should_demangle = true; + + return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name; +} + +// Check if the global is a zero-terminated ASCII string. If so, print it. +static void PrintGlobalNameIfASCII(InternalScopedString *str, + const __asan_global &g) { + for (uptr p = g.beg; p < g.beg + g.size - 1; p++) { + unsigned char c = *(unsigned char*)p; + if (c == '\0' || !IsASCII(c)) return; + } + if (*(char*)(g.beg + g.size - 1) != '\0') return; + str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name), + (char *)g.beg); +} + +static const char *GlobalFilename(const __asan_global &g) { + const char *res = g.module_name; + // Prefer the filename from source location, if is available. + if (g.location) + res = g.location->filename; + CHECK(res); + return res; +} + +static void PrintGlobalLocation(InternalScopedString *str, + const __asan_global &g) { + str->append("%s", GlobalFilename(g)); + if (!g.location) + return; + if (g.location->line_no) + str->append(":%d", g.location->line_no); + if (g.location->column_no) + str->append(":%d", g.location->column_no); +} + +bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, + const __asan_global &g) { + if (!IsAddressNearGlobal(addr, g)) return false; + InternalScopedString str(4096); + Decorator d; + str.append("%s", d.Location()); + if (addr < g.beg) { + str.append("%p is located %zd bytes to the left", (void *)addr, + g.beg - addr); + } else if (addr + size > g.beg + g.size) { + if (addr < g.beg + g.size) + addr = g.beg + g.size; + str.append("%p is located %zd bytes to the right", (void *)addr, + addr - (g.beg + g.size)); + } else { + // Can it happen? + str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg); + } + str.append(" of global variable '%s' defined in '", + MaybeDemangleGlobalName(g.name)); + PrintGlobalLocation(&str, g); + str.append("' (0x%zx) of size %zu\n", g.beg, g.size); + str.append("%s", d.EndLocation()); + PrintGlobalNameIfASCII(&str, g); + Printf("%s", str.data()); + return true; +} + +bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr, bool print) { + if (AddrIsInMem(addr)) + return false; + const char *area_type = nullptr; + if (AddrIsInShadowGap(addr)) area_type = "shadow gap"; + else if (AddrIsInHighShadow(addr)) area_type = "high shadow"; + else if (AddrIsInLowShadow(addr)) area_type = "low shadow"; + if (area_type != nullptr) { + if (print) { + Printf("Address %p is located in the %s area.\n", addr, area_type); + } else { + CHECK(descr); + descr->region_kind = area_type; + } + return true; + } + CHECK(0 && "Address is not in memory and not in shadow?"); + return false; +} + +// Return " (thread_name) " or an empty string if the name is empty. +const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[], + uptr buff_len) { + const char *name = t->name; + if (name[0] == '\0') return ""; + buff[0] = 0; + internal_strncat(buff, " (", 3); + internal_strncat(buff, name, buff_len - 4); + internal_strncat(buff, ")", 2); + return buff; +} + +const char *ThreadNameWithParenthesis(u32 tid, char buff[], + uptr buff_len) { + if (tid == kInvalidTid) return ""; + asanThreadRegistry().CheckLocked(); + AsanThreadContext *t = GetThreadContextByTidLocked(tid); + return ThreadNameWithParenthesis(t, buff, buff_len); +} + +static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr, + uptr access_size, uptr prev_var_end, + uptr next_var_beg) { + uptr var_end = var.beg + var.size; + uptr addr_end = addr + access_size; + const char *pos_descr = 0; + // If the variable [var.beg, var_end) is the nearest variable to the + // current memory access, indicate it in the log. + if (addr >= var.beg) { + if (addr_end <= var_end) + pos_descr = "is inside"; // May happen if this is a use-after-return. + else if (addr < var_end) + pos_descr = "partially overflows"; + else if (addr_end <= next_var_beg && + next_var_beg - addr_end >= addr - var_end) + pos_descr = "overflows"; + } else { + if (addr_end > var.beg) + pos_descr = "partially underflows"; + else if (addr >= prev_var_end && + addr - prev_var_end >= var.beg - addr_end) + pos_descr = "underflows"; + } + InternalScopedString str(1024); + str.append(" [%zd, %zd)", var.beg, var_end); + // Render variable name. + str.append(" '"); + for (uptr i = 0; i < var.name_len; ++i) { + str.append("%c", var.name_pos[i]); + } + str.append("'"); + if (pos_descr) { + Decorator d; + // FIXME: we may want to also print the size of the access here, + // but in case of accesses generated by memset it may be confusing. + str.append("%s <== Memory access at offset %zd %s this variable%s\n", + d.Location(), addr, pos_descr, d.EndLocation()); + } else { + str.append("\n"); + } + Printf("%s", str.data()); +} + +bool ParseFrameDescription(const char *frame_descr, + InternalMmapVector<StackVarDescr> *vars) { + CHECK(frame_descr); + char *p; + // This string is created by the compiler and has the following form: + // "n alloc_1 alloc_2 ... alloc_n" + // where alloc_i looks like "offset size len ObjectName". + uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); + if (n_objects == 0) + return false; + + for (uptr i = 0; i < n_objects; i++) { + uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); + uptr size = (uptr)internal_simple_strtoll(p, &p, 10); + uptr len = (uptr)internal_simple_strtoll(p, &p, 10); + if (beg == 0 || size == 0 || *p != ' ') { + return false; + } + p++; + StackVarDescr var = {beg, size, p, len}; + vars->push_back(var); + p += len; + } + + return true; +} + +bool DescribeAddressIfStack(uptr addr, uptr access_size) { + AsanThread *t = FindThreadByStackAddress(addr); + if (!t) return false; + + Decorator d; + char tname[128]; + Printf("%s", d.Location()); + Printf("Address %p is located in stack of thread T%d%s", addr, t->tid(), + ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname))); + + // Try to fetch precise stack frame for this access. + AsanThread::StackFrameAccess access; + if (!t->GetStackFrameAccessByAddr(addr, &access)) { + Printf("%s\n", d.EndLocation()); + return true; + } + Printf(" at offset %zu in frame%s\n", access.offset, d.EndLocation()); + + // Now we print the frame where the alloca has happened. + // We print this frame as a stack trace with one element. + // The symbolizer may print more than one frame if inlining was involved. + // The frame numbers may be different than those in the stack trace printed + // previously. That's unfortunate, but I have no better solution, + // especially given that the alloca may be from entirely different place + // (e.g. use-after-scope, or different thread's stack). +#if defined(__powerpc64__) && defined(__BIG_ENDIAN__) + // On PowerPC64 ELFv1, the address of a function actually points to a + // three-doubleword data structure with the first field containing + // the address of the function's code. + access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc); +#endif + access.frame_pc += 16; + Printf("%s", d.EndLocation()); + StackTrace alloca_stack(&access.frame_pc, 1); + alloca_stack.Print(); + + InternalMmapVector<StackVarDescr> vars(16); + if (!ParseFrameDescription(access.frame_descr, &vars)) { + Printf("AddressSanitizer can't parse the stack frame " + "descriptor: |%s|\n", access.frame_descr); + // 'addr' is a stack address, so return true even if we can't parse frame + return true; + } + uptr n_objects = vars.size(); + // Report the number of stack objects. + Printf(" This frame has %zu object(s):\n", n_objects); + + // Report all objects in this frame. + for (uptr i = 0; i < n_objects; i++) { + uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0; + uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL); + PrintAccessAndVarIntersection(vars[i], access.offset, access_size, + prev_var_end, next_var_beg); + } + Printf("HINT: this may be a false positive if your program uses " + "some custom stack unwind mechanism or swapcontext\n"); + if (SANITIZER_WINDOWS) + Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n"); + else + Printf(" (longjmp and C++ exceptions *are* supported)\n"); + + DescribeThread(t); + return true; +} + +static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr, + uptr access_size) { + sptr offset; + Decorator d; + InternalScopedString str(4096); + str.append("%s", d.Location()); + if (chunk.AddrIsAtLeft(addr, access_size, &offset)) { + str.append("%p is located %zd bytes to the left of", (void *)addr, offset); + } else if (chunk.AddrIsAtRight(addr, access_size, &offset)) { + if (offset < 0) { + addr -= offset; + offset = 0; + } + str.append("%p is located %zd bytes to the right of", (void *)addr, offset); + } else if (chunk.AddrIsInside(addr, access_size, &offset)) { + str.append("%p is located %zd bytes inside of", (void*)addr, offset); + } else { + str.append("%p is located somewhere around (this is AddressSanitizer bug!)", + (void *)addr); + } + str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(), + (void *)(chunk.Beg()), (void *)(chunk.End())); + str.append("%s", d.EndLocation()); + Printf("%s", str.data()); +} + +void DescribeHeapAddress(uptr addr, uptr access_size) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + if (!chunk.IsValid()) { + Printf("AddressSanitizer can not describe address in more detail " + "(wild memory access suspected).\n"); + return; + } + DescribeAccessToHeapChunk(chunk, addr, access_size); + CHECK(chunk.AllocTid() != kInvalidTid); + asanThreadRegistry().CheckLocked(); + AsanThreadContext *alloc_thread = + GetThreadContextByTidLocked(chunk.AllocTid()); + StackTrace alloc_stack = chunk.GetAllocStack(); + char tname[128]; + Decorator d; + AsanThreadContext *free_thread = 0; + if (chunk.FreeTid() != kInvalidTid) { + free_thread = GetThreadContextByTidLocked(chunk.FreeTid()); + Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), + free_thread->tid, + ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)), + d.EndAllocation()); + StackTrace free_stack = chunk.GetFreeStack(); + free_stack.Print(); + Printf("%spreviously allocated by thread T%d%s here:%s\n", + d.Allocation(), alloc_thread->tid, + ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)), + d.EndAllocation()); + } else { + Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(), + alloc_thread->tid, + ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)), + d.EndAllocation()); + } + alloc_stack.Print(); + DescribeThread(GetCurrentThread()); + if (free_thread) + DescribeThread(free_thread); + DescribeThread(alloc_thread); +} + +void DescribeAddress(uptr addr, uptr access_size) { + // Check if this is shadow or shadow gap. + if (DescribeAddressIfShadow(addr)) + return; + CHECK(AddrIsInMem(addr)); + if (DescribeAddressIfGlobal(addr, access_size)) + return; + if (DescribeAddressIfStack(addr, access_size)) + return; + // Assume it is a heap address. + DescribeHeapAddress(addr, access_size); +} + +// ------------------- Thread description -------------------- {{{1 + +void DescribeThread(AsanThreadContext *context) { + CHECK(context); + asanThreadRegistry().CheckLocked(); + // No need to announce the main thread. + if (context->tid == 0 || context->announced) { + return; + } + context->announced = true; + char tname[128]; + InternalScopedString str(1024); + str.append("Thread T%d%s", context->tid, + ThreadNameWithParenthesis(context->tid, tname, sizeof(tname))); + str.append( + " created by T%d%s here:\n", context->parent_tid, + ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); + Printf("%s", str.data()); + StackDepotGet(context->stack_id).Print(); + // Recursively described parent thread if needed. + if (flags()->print_full_thread_history) { + AsanThreadContext *parent_context = + GetThreadContextByTidLocked(context->parent_tid); + DescribeThread(parent_context); + } +} + +// -------------------- Different kinds of reports ----------------- {{{1 + +// Use ScopedInErrorReport to run common actions just before and +// immediately after printing error report. +class ScopedInErrorReport { + public: + explicit ScopedInErrorReport(ReportData *report = nullptr) { + static atomic_uint32_t num_calls; + static u32 reporting_thread_tid; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { + // Do not print more than one report, otherwise they will mix up. + // Error reporting functions shouldn't return at this situation, as + // they are defined as no-return. + Report("AddressSanitizer: while reporting a bug found another one. " + "Ignoring.\n"); + u32 current_tid = GetCurrentTidOrInvalid(); + if (current_tid != reporting_thread_tid) { + // ASan found two bugs in different threads simultaneously. Sleep + // long enough to make sure that the thread which started to print + // an error report will finish doing it. + SleepForSeconds(Max(100, flags()->sleep_before_dying + 1)); + } + // If we're still not dead for some reason, use raw _exit() instead of + // Die() to bypass any additional checks. + internal__exit(flags()->exitcode); + } + if (report) report_data = *report; + report_happened = true; + ASAN_ON_ERROR(); + // Make sure the registry and sanitizer report mutexes are locked while + // we're printing an error report. + // We can lock them only here to avoid self-deadlock in case of + // recursive reports. + asanThreadRegistry().Lock(); + CommonSanitizerReportMutex.Lock(); + reporting_thread_tid = GetCurrentTidOrInvalid(); + Printf("====================================================" + "=============\n"); + } + // Destructor is NORETURN, as functions that report errors are. + NORETURN ~ScopedInErrorReport() { + // Make sure the current thread is announced. + DescribeThread(GetCurrentThread()); + // We may want to grab this lock again when printing stats. + asanThreadRegistry().Unlock(); + // Print memory stats. + if (flags()->print_stats) + __asan_print_accumulated_stats(); + if (error_report_callback) { + error_report_callback(error_message_buffer); + } + Report("ABORTING\n"); + Die(); + } +}; + +void ReportStackOverflow(const SignalContext &sig) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report( + "ERROR: AddressSanitizer: stack-overflow on address %p" + " (pc %p bp %p sp %p T%d)\n", + (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp, + GetCurrentTidOrInvalid()); + Printf("%s", d.EndWarning()); + GET_STACK_TRACE_SIGNAL(sig); + stack.Print(); + ReportErrorSummary("stack-overflow", &stack); +} + +void ReportSIGSEGV(const char *description, const SignalContext &sig) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report( + "ERROR: AddressSanitizer: %s on unknown address %p" + " (pc %p bp %p sp %p T%d)\n", + description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, + (void *)sig.sp, GetCurrentTidOrInvalid()); + if (sig.pc < GetPageSizeCached()) { + Report("Hint: pc points to the zero page.\n"); + } + Printf("%s", d.EndWarning()); + GET_STACK_TRACE_SIGNAL(sig); + stack.Print(); + MaybeDumpInstructionBytes(sig.pc); + Printf("AddressSanitizer can not provide additional info.\n"); + ReportErrorSummary("SEGV", &stack); +} + +void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + char tname[128]; + u32 curr_tid = GetCurrentTidOrInvalid(); + Report("ERROR: AddressSanitizer: attempting double-free on %p in " + "thread T%d%s:\n", + addr, curr_tid, + ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); + Printf("%s", d.EndWarning()); + CHECK_GT(free_stack->size, 0); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + DescribeHeapAddress(addr, 1); + ReportErrorSummary("double-free", &stack); +} + +void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, + BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + char tname[128]; + u32 curr_tid = GetCurrentTidOrInvalid(); + Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in " + "thread T%d%s:\n", + addr, curr_tid, + ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); + Printf("%s object passed to delete has wrong type:\n", d.EndWarning()); + Printf(" size of the allocated type: %zd bytes;\n" + " size of the deallocated type: %zd bytes.\n", + asan_mz_size(reinterpret_cast<void*>(addr)), delete_size); + CHECK_GT(free_stack->size, 0); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + DescribeHeapAddress(addr, 1); + ReportErrorSummary("new-delete-type-mismatch", &stack); + Report("HINT: if you don't care about these warnings you may set " + "ASAN_OPTIONS=new_delete_type_mismatch=0\n"); +} + +void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + char tname[128]; + u32 curr_tid = GetCurrentTidOrInvalid(); + Report("ERROR: AddressSanitizer: attempting free on address " + "which was not malloc()-ed: %p in thread T%d%s\n", addr, + curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); + Printf("%s", d.EndWarning()); + CHECK_GT(free_stack->size, 0); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + DescribeHeapAddress(addr, 1); + ReportErrorSummary("bad-free", &stack); +} + +void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, + AllocType alloc_type, + AllocType dealloc_type) { + static const char *alloc_names[] = + {"INVALID", "malloc", "operator new", "operator new []"}; + static const char *dealloc_names[] = + {"INVALID", "free", "operator delete", "operator delete []"}; + CHECK_NE(alloc_type, dealloc_type); + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n", + alloc_names[alloc_type], dealloc_names[dealloc_type], addr); + Printf("%s", d.EndWarning()); + CHECK_GT(free_stack->size, 0); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + DescribeHeapAddress(addr, 1); + ReportErrorSummary("alloc-dealloc-mismatch", &stack); + Report("HINT: if you don't care about these warnings you may set " + "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); +} + +void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: attempting to call " + "malloc_usable_size() for pointer which is " + "not owned: %p\n", addr); + Printf("%s", d.EndWarning()); + stack->Print(); + DescribeHeapAddress(addr, 1); + ReportErrorSummary("bad-malloc_usable_size", stack); +} + +void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: attempting to call " + "__sanitizer_get_allocated_size() for pointer which is " + "not owned: %p\n", addr); + Printf("%s", d.EndWarning()); + stack->Print(); + DescribeHeapAddress(addr, 1); + ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack); +} + +void ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Decorator d; + char bug_type[100]; + internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: %s: " + "memory ranges [%p,%p) and [%p, %p) overlap\n", \ + bug_type, offset1, offset1 + length1, offset2, offset2 + length2); + Printf("%s", d.EndWarning()); + stack->Print(); + DescribeAddress((uptr)offset1, length1); + DescribeAddress((uptr)offset2, length2); + ReportErrorSummary(bug_type, stack); +} + +void ReportStringFunctionSizeOverflow(uptr offset, uptr size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Decorator d; + const char *bug_type = "negative-size-param"; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size); + Printf("%s", d.EndWarning()); + stack->Print(); + DescribeAddress(offset, size); + ReportErrorSummary(bug_type, stack); +} + +void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, + uptr old_mid, uptr new_mid, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Report("ERROR: AddressSanitizer: bad parameters to " + "__sanitizer_annotate_contiguous_container:\n" + " beg : %p\n" + " end : %p\n" + " old_mid : %p\n" + " new_mid : %p\n", + beg, end, old_mid, new_mid); + stack->Print(); + ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack); +} + +void ReportODRViolation(const __asan_global *g1, u32 stack_id1, + const __asan_global *g2, u32 stack_id2) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg); + Printf("%s", d.EndWarning()); + InternalScopedString g1_loc(256), g2_loc(256); + PrintGlobalLocation(&g1_loc, *g1); + PrintGlobalLocation(&g2_loc, *g2); + Printf(" [1] size=%zd '%s' %s\n", g1->size, + MaybeDemangleGlobalName(g1->name), g1_loc.data()); + Printf(" [2] size=%zd '%s' %s\n", g2->size, + MaybeDemangleGlobalName(g2->name), g2_loc.data()); + if (stack_id1 && stack_id2) { + Printf("These globals were registered at these points:\n"); + Printf(" [1]:\n"); + StackDepotGet(stack_id1).Print(); + Printf(" [2]:\n"); + StackDepotGet(stack_id2).Print(); + } + Report("HINT: if you don't care about these warnings you may set " + "ASAN_OPTIONS=detect_odr_violation=0\n"); + InternalScopedString error_msg(256); + error_msg.append("odr-violation: global '%s' at %s", + MaybeDemangleGlobalName(g1->name), g1_loc.data()); + ReportErrorSummary(error_msg.data()); +} + +// ----------------------- CheckForInvalidPointerPair ----------- {{{1 +static NOINLINE void +ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) { + ScopedInErrorReport in_report; + Decorator d; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2); + Printf("%s", d.EndWarning()); + GET_STACK_TRACE_FATAL(pc, bp); + stack.Print(); + DescribeAddress(a1, 1); + DescribeAddress(a2, 1); + ReportErrorSummary("invalid-pointer-pair", &stack); +} + +static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { + if (!flags()->detect_invalid_pointer_pairs) return; + uptr a1 = reinterpret_cast<uptr>(p1); + uptr a2 = reinterpret_cast<uptr>(p2); + AsanChunkView chunk1 = FindHeapChunkByAddress(a1); + AsanChunkView chunk2 = FindHeapChunkByAddress(a2); + bool valid1 = chunk1.IsValid(); + bool valid2 = chunk2.IsValid(); + if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) { + GET_CALLER_PC_BP_SP; \ + return ReportInvalidPointerPair(pc, bp, sp, a1, a2); + } +} +// ----------------------- Mac-specific reports ----------------- {{{1 + +void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { + // Just print a warning here. + Printf("free_common(%p) -- attempting to free unallocated memory.\n" + "AddressSanitizer is ignoring this error on Mac OS now.\n", + addr); + PrintZoneForPointer(addr, zone_ptr, zone_name); + stack->Print(); + DescribeHeapAddress(addr, 1); +} + +void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" + "This is an unrecoverable problem, exiting now.\n", + addr); + PrintZoneForPointer(addr, zone_ptr, zone_name); + stack->Print(); + DescribeHeapAddress(addr, 1); +} + +void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n" + "This is an unrecoverable problem, exiting now.\n", + addr); + PrintZoneForPointer(addr, zone_ptr, zone_name); + stack->Print(); + DescribeHeapAddress(addr, 1); +} + +} // namespace __asan + +// --------------------------- Interface --------------------- {{{1 +using namespace __asan; // NOLINT + +void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, + uptr access_size) { + // Determine the error type. + const char *bug_descr = "unknown-crash"; + if (AddrIsInMem(addr)) { + u8 *shadow_addr = (u8*)MemToShadow(addr); + // If we are accessing 16 bytes, look at the second shadow byte. + if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) + shadow_addr++; + // If we are in the partial right redzone, look at the next shadow byte. + if (*shadow_addr > 0 && *shadow_addr < 128) + shadow_addr++; + switch (*shadow_addr) { + case kAsanHeapLeftRedzoneMagic: + case kAsanHeapRightRedzoneMagic: + case kAsanArrayCookieMagic: + bug_descr = "heap-buffer-overflow"; + break; + case kAsanHeapFreeMagic: + bug_descr = "heap-use-after-free"; + break; + case kAsanStackLeftRedzoneMagic: + bug_descr = "stack-buffer-underflow"; + break; + case kAsanInitializationOrderMagic: + bug_descr = "initialization-order-fiasco"; + break; + case kAsanStackMidRedzoneMagic: + case kAsanStackRightRedzoneMagic: + case kAsanStackPartialRedzoneMagic: + bug_descr = "stack-buffer-overflow"; + break; + case kAsanStackAfterReturnMagic: + bug_descr = "stack-use-after-return"; + break; + case kAsanUserPoisonedMemoryMagic: + bug_descr = "use-after-poison"; + break; + case kAsanContiguousContainerOOBMagic: + bug_descr = "container-overflow"; + break; + case kAsanStackUseAfterScopeMagic: + bug_descr = "stack-use-after-scope"; + break; + case kAsanGlobalRedzoneMagic: + bug_descr = "global-buffer-overflow"; + break; + case kAsanIntraObjectRedzone: + bug_descr = "intra-object-overflow"; + break; + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + bug_descr = "dynamic-stack-buffer-overflow"; + break; + } + } + + ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size, + bug_descr }; + ScopedInErrorReport in_report(&report); + + Decorator d; + Printf("%s", d.Warning()); + Report("ERROR: AddressSanitizer: %s on address " + "%p at pc %p bp %p sp %p\n", + bug_descr, (void*)addr, pc, bp, sp); + Printf("%s", d.EndWarning()); + + u32 curr_tid = GetCurrentTidOrInvalid(); + char tname[128]; + Printf("%s%s of size %zu at %p thread T%d%s%s\n", + d.Access(), + access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", + access_size, (void*)addr, curr_tid, + ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)), + d.EndAccess()); + + GET_STACK_TRACE_FATAL(pc, bp); + stack.Print(); + + DescribeAddress(addr, access_size); + ReportErrorSummary(bug_descr, &stack); + PrintShadowMemoryForAddress(addr); +} + +void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { + error_report_callback = callback; + if (callback) { + error_message_buffer_size = 1 << 16; + error_message_buffer = + (char*)MmapOrDie(error_message_buffer_size, __func__); + error_message_buffer_pos = 0; + } +} + +void __asan_describe_address(uptr addr) { + // Thread registry must be locked while we're describing an address. + asanThreadRegistry().Lock(); + DescribeAddress(addr, 1); + asanThreadRegistry().Unlock(); +} + +int __asan_report_present() { + return report_happened ? 1 : 0; +} + +uptr __asan_get_report_pc() { + return report_data.pc; +} + +uptr __asan_get_report_bp() { + return report_data.bp; +} + +uptr __asan_get_report_sp() { + return report_data.sp; +} + +uptr __asan_get_report_address() { + return report_data.addr; +} + +int __asan_get_report_access_type() { + return report_data.is_write ? 1 : 0; +} + +uptr __asan_get_report_access_size() { + return report_data.access_size; +} + +const char *__asan_get_report_description() { + return report_data.description; +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_ptr_sub(void *a, void *b) { + CheckForInvalidPointerPair(a, b); +} +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_ptr_cmp(void *a, void *b) { + CheckForInvalidPointerPair(a, b); +} +} // extern "C" + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default implementation of __asan_on_error that does nothing +// and may be overriden by user. +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE +void __asan_on_error() {} +#endif diff --git a/contrib/compiler-rt/lib/asan/asan_report.h b/contrib/compiler-rt/lib/asan/asan_report.h new file mode 100644 index 000000000000..029c914b8a04 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_report.h @@ -0,0 +1,95 @@ +//===-- asan_report.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for error reporting functions. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_thread.h" + +namespace __asan { + +struct StackVarDescr { + uptr beg; + uptr size; + const char *name_pos; + uptr name_len; +}; + +struct AddressDescription { + char *name; + uptr name_size; + uptr region_address; + uptr region_size; + const char *region_kind; +}; + +// The following functions prints address description depending +// on the memory type (shadow/heap/stack/global). +void DescribeHeapAddress(uptr addr, uptr access_size); +bool DescribeAddressIfGlobal(uptr addr, uptr access_size); +bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, + const __asan_global &g); +bool IsAddressNearGlobal(uptr addr, const __asan_global &g); +bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr); +bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr, + bool print = true); +bool ParseFrameDescription(const char *frame_descr, + InternalMmapVector<StackVarDescr> *vars); +bool DescribeAddressIfStack(uptr addr, uptr access_size); +// Determines memory type on its own. +void DescribeAddress(uptr addr, uptr access_size); + +void DescribeThread(AsanThreadContext *context); + +// Different kinds of error reports. +void NORETURN ReportStackOverflow(const SignalContext &sig); +void NORETURN ReportSIGSEGV(const char *description, const SignalContext &sig); +void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, + BufferedStackTrace *free_stack); +void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); +void NORETURN ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack); +void NORETURN ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, + AllocType alloc_type, + AllocType dealloc_type); +void NORETURN + ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack); +void NORETURN + ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack); +void NORETURN + ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack); +void NORETURN ReportStringFunctionSizeOverflow(uptr offset, uptr size, + BufferedStackTrace *stack); +void NORETURN + ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, + uptr old_mid, uptr new_mid, + BufferedStackTrace *stack); + +void NORETURN +ReportODRViolation(const __asan_global *g1, u32 stack_id1, + const __asan_global *g2, u32 stack_id2); + +// Mac-specific errors and warnings. +void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack); +void NORETURN ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, + const char *zone_name, + BufferedStackTrace *stack); +void NORETURN ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, + const char *zone_name, + BufferedStackTrace *stack); + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_rtl.cc b/contrib/compiler-rt/lib/asan/asan_rtl.cc new file mode 100644 index 000000000000..34fb111d5607 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_rtl.cc @@ -0,0 +1,776 @@ +//===-- asan_rtl.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Main file of the ASan run-time library. +//===----------------------------------------------------------------------===// +#include "asan_activation.h" +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_interface_internal.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "lsan/lsan_common.h" + +int __asan_option_detect_stack_use_after_return; // Global interface symbol. +uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. + +namespace __asan { + +uptr AsanMappingProfile[kAsanMappingProfileSize]; + +static void AsanDie() { + static atomic_uint32_t num_calls; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { + // Don't die twice - run a busy loop. + while (1) { } + } + if (flags()->sleep_before_dying) { + Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying); + SleepForSeconds(flags()->sleep_before_dying); + } + if (flags()->unmap_shadow_on_exit) { + if (kMidMemBeg) { + UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); + UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); + } else { + UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); + } + } + if (common_flags()->coverage) + __sanitizer_cov_dump(); + if (death_callback) + death_callback(); + if (flags()->abort_on_error) + Abort(); + internal__exit(flags()->exitcode); +} + +static void AsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, + line, cond, (uptr)v1, (uptr)v2); + // FIXME: check for infinite recursion without a thread-local counter here. + PRINT_CURRENT_STACK_CHECK(); + Die(); +} + +// -------------------------- Flags ------------------------- {{{1 +static const int kDefaultMallocContextSize = 30; + +Flags asan_flags_dont_use_directly; // use via flags(). + +static const char *MaybeCallAsanDefaultOptions() { + return (&__asan_default_options) ? __asan_default_options() : ""; +} + +static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { +#ifdef ASAN_DEFAULT_OPTIONS +// Stringize the macro value. +# define ASAN_STRINGIZE(x) #x +# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) + return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + +static void ParseFlagsFromString(Flags *f, const char *str) { + CommonFlags *cf = common_flags(); + ParseCommonFlagsFromString(cf, str); + CHECK((uptr)cf->malloc_context_size <= kStackTraceMax); + // Please write meaningful flag descriptions when adding new flags. + ParseFlag(str, &f->quarantine_size, "quarantine_size", + "Size (in bytes) of quarantine used to detect use-after-free " + "errors. Lower value may reduce memory usage but increase the " + "chance of false negatives."); + ParseFlag(str, &f->redzone, "redzone", + "Minimal size (in bytes) of redzones around heap objects. " + "Requirement: redzone >= 16, is a power of two."); + ParseFlag(str, &f->max_redzone, "max_redzone", + "Maximal size (in bytes) of redzones around heap objects."); + CHECK_GE(f->redzone, 16); + CHECK_GE(f->max_redzone, f->redzone); + CHECK_LE(f->max_redzone, 2048); + CHECK(IsPowerOfTwo(f->redzone)); + CHECK(IsPowerOfTwo(f->max_redzone)); + + ParseFlag(str, &f->debug, "debug", + "If set, prints some debugging information and does additional checks."); + ParseFlag(str, &f->report_globals, "report_globals", + "Controls the way to handle globals (0 - don't detect buffer overflow on " + "globals, 1 - detect buffer overflow, 2 - print data about registered " + "globals)."); + + ParseFlag(str, &f->check_initialization_order, + "check_initialization_order", + "If set, attempts to catch initialization order issues."); + + ParseFlag(str, &f->replace_str, "replace_str", + "If set, uses custom wrappers and replacements for libc string functions " + "to find more errors."); + + ParseFlag(str, &f->replace_intrin, "replace_intrin", + "If set, uses custom wrappers for memset/memcpy/memmove intinsics."); + ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free", + "Ignore invalid free() calls to work around some bugs. Used on OS X " + "only."); + ParseFlag(str, &f->detect_stack_use_after_return, + "detect_stack_use_after_return", + "Enables stack-use-after-return checking at run-time."); + ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log", + "Minimum fake stack size log."); + ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log", + "Maximum fake stack size log."); + ParseFlag(str, &f->uar_noreserve, "uar_noreserve", + "Use mmap with 'norserve' flag to allocate fake stack."); + ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size", + "ASan allocator flag. max_malloc_fill_size is the maximal amount of " + "bytes that will be filled with malloc_fill_byte on malloc."); + ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte", + "Value used to fill the newly allocated memory."); + ParseFlag(str, &f->exitcode, "exitcode", + "Override the program exit status if the tool found an error."); + ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning", + "If set, user may manually mark memory regions as poisoned or " + "unpoisoned."); + ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying", + "Number of seconds to sleep between printing an error report and " + "terminating the program. Useful for debugging purposes (e.g. when one " + "needs to attach gdb)."); + + ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size", + "Allows the users to work around the bug in Nvidia drivers prior to " + "295.*."); + + ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit", + "If set, explicitly unmaps the (huge) shadow at exit."); + ParseFlag(str, &f->abort_on_error, "abort_on_error", + "If set, the tool calls abort() instead of _exit() after printing the " + "error report."); + ParseFlag(str, &f->print_stats, "print_stats", + "Print various statistics after printing an error message or if " + "atexit=1."); + ParseFlag(str, &f->print_legend, "print_legend", + "Print the legend for the shadow bytes."); + ParseFlag(str, &f->atexit, "atexit", + "If set, prints ASan exit stats even after program terminates " + "successfully."); + + ParseFlag(str, &f->allow_reexec, "allow_reexec", + "Allow the tool to re-exec the program. This may interfere badly with " + "the debugger."); + + ParseFlag(str, &f->print_full_thread_history, + "print_full_thread_history", + "If set, prints thread creation stacks for the threads involved in the " + "report and their ancestors up to the main thread."); + + ParseFlag(str, &f->poison_heap, "poison_heap", + "Poison (or not) the heap memory on [de]allocation. Zero value is useful " + "for benchmarking the allocator or instrumentator."); + + ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie", + "Poison (or not) the array cookie after operator new[]."); + + ParseFlag(str, &f->poison_partial, "poison_partial", + "If true, poison partially addressable 8-byte aligned words " + "(default=true). This flag affects heap and global buffers, but not " + "stack buffers."); + + ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch", + "Report errors on malloc/delete, new/free, new/delete[], etc."); + + ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch", + "Report errors on mismatch betwen size of new and delete."); + + ParseFlag(str, &f->strict_memcmp, "strict_memcmp", + "If true, assume that memcmp(p1, p2, n) always reads n bytes before " + "comparing p1 and p2."); + + ParseFlag(str, &f->strict_init_order, "strict_init_order", + "If true, assume that dynamic initializers can never access globals from " + "other modules, even if the latter are already initialized."); + + ParseFlag(str, &f->start_deactivated, "start_deactivated", + "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap " + "poisoning) to reduce memory consumption as much as possible, and " + "restores them to original values when the first instrumented module is " + "loaded into the process. This is mainly intended to be used on " + "Android. "); + + ParseFlag(str, &f->detect_invalid_pointer_pairs, + "detect_invalid_pointer_pairs", + "If non-zero, try to detect operations like <, <=, >, >= and - on " + "invalid pointer pairs (e.g. when pointers belong to different objects). " + "The bigger the value the harder we try."); + + ParseFlag(str, &f->detect_container_overflow, + "detect_container_overflow", + "If true, honor the container overflow annotations. " + "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow"); + + ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation", + "If >=2, detect violation of One-Definition-Rule (ODR); " + "If ==1, detect ODR-violation only if the two variables " + "have different sizes"); + + ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes", + "If true, dump 16 bytes starting at the instruction that caused SEGV"); +} + +void InitializeFlags(Flags *f, const char *env) { + CommonFlags *cf = common_flags(); + SetCommonFlagsDefaults(cf); + cf->detect_leaks = CAN_SANITIZE_LEAKS; + cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); + cf->malloc_context_size = kDefaultMallocContextSize; + cf->intercept_tls_get_addr = true; + cf->coverage = false; + + internal_memset(f, 0, sizeof(*f)); + f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; + f->redzone = 16; + f->max_redzone = 2048; + f->debug = false; + f->report_globals = 1; + f->check_initialization_order = false; + f->replace_str = true; + f->replace_intrin = true; + f->mac_ignore_invalid_free = false; + f->detect_stack_use_after_return = false; // Also needs the compiler flag. + f->min_uar_stack_size_log = 16; // We can't do smaller anyway. + f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread. + f->uar_noreserve = false; + f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K. + f->malloc_fill_byte = 0xbe; + f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE; + f->allow_user_poisoning = true; + f->sleep_before_dying = 0; + f->check_malloc_usable_size = true; + f->unmap_shadow_on_exit = false; + f->abort_on_error = false; + f->print_stats = false; + f->print_legend = true; + f->atexit = false; + f->allow_reexec = true; + f->print_full_thread_history = true; + f->poison_heap = true; + f->poison_array_cookie = true; + f->poison_partial = true; + // Turn off alloc/dealloc mismatch checker on Mac and Windows for now. + // https://code.google.com/p/address-sanitizer/issues/detail?id=131 + // https://code.google.com/p/address-sanitizer/issues/detail?id=309 + // TODO(glider,timurrrr): Fix known issues and enable this back. + f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0); + f->new_delete_type_mismatch = true; + f->strict_memcmp = true; + f->strict_init_order = false; + f->start_deactivated = false; + f->detect_invalid_pointer_pairs = 0; + f->detect_container_overflow = true; + f->detect_odr_violation = 2; + f->dump_instruction_bytes = false; + + // Override from compile definition. + ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition()); + + // Override from user-specified string. + ParseFlagsFromString(f, MaybeCallAsanDefaultOptions()); + VReport(1, "Using the defaults from __asan_default_options: %s\n", + MaybeCallAsanDefaultOptions()); + + // Override from command line. + ParseFlagsFromString(f, env); + if (common_flags()->help) { + PrintFlagDescriptions(); + } + + if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) { + Report("%s: detect_leaks is not supported on this platform.\n", + SanitizerToolName); + cf->detect_leaks = false; + } + + // Make "strict_init_order" imply "check_initialization_order". + // TODO(samsonov): Use a single runtime flag for an init-order checker. + if (f->strict_init_order) { + f->check_initialization_order = true; + } +} + +// Parse flags that may change between startup and activation. +// On Android they come from a system property. +// On other platforms this is no-op. +void ParseExtraActivationFlags() { + char buf[100]; + GetExtraActivationFlags(buf, sizeof(buf)); + ParseFlagsFromString(flags(), buf); + if (buf[0] != '\0') + VReport(1, "Extra activation flags: %s\n", buf); +} + +// -------------------------- Globals --------------------- {{{1 +int asan_inited; +bool asan_init_is_running; +void (*death_callback)(void); + +#if !ASAN_FIXED_MAPPING +uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; +#endif + +// -------------------------- Misc ---------------- {{{1 +void ShowStatsAndAbort() { + __asan_print_accumulated_stats(); + Die(); +} + +// ---------------------- mmap -------------------- {{{1 +// Reserve memory range [beg, end]. +static void ReserveShadowMemoryRange(uptr beg, uptr end) { + CHECK_EQ((beg % GetPageSizeCached()), 0); + CHECK_EQ(((end + 1) % GetPageSizeCached()), 0); + uptr size = end - beg + 1; + DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. + void *res = MmapFixedNoReserve(beg, size); + if (res != (void*)beg) { + Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " + "Perhaps you're using ulimit -v\n", size); + Abort(); + } +} + +// --------------- LowLevelAllocateCallbac ---------- {{{1 +static void OnLowLevelAllocate(uptr ptr, uptr size) { + PoisonShadow(ptr, size, kAsanInternalHeapMagic); +} + +// -------------------------- Run-time entry ------------------- {{{1 +// exported functions +#define ASAN_REPORT_ERROR(type, is_write, size) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size(uptr addr); \ +void __asan_report_ ## type ## size(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size); \ +} + +ASAN_REPORT_ERROR(load, false, 1) +ASAN_REPORT_ERROR(load, false, 2) +ASAN_REPORT_ERROR(load, false, 4) +ASAN_REPORT_ERROR(load, false, 8) +ASAN_REPORT_ERROR(load, false, 16) +ASAN_REPORT_ERROR(store, true, 1) +ASAN_REPORT_ERROR(store, true, 2) +ASAN_REPORT_ERROR(store, true, 4) +ASAN_REPORT_ERROR(store, true, 8) +ASAN_REPORT_ERROR(store, true, 16) + +#define ASAN_REPORT_ERROR_N(type, is_write) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## _n(uptr addr, uptr size); \ +void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size); \ +} + +ASAN_REPORT_ERROR_N(load, false) +ASAN_REPORT_ERROR_N(store, true) + +#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \ + void __asan_##type##size(uptr addr) { \ + uptr sp = MEM_TO_SHADOW(addr); \ + uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \ + : *reinterpret_cast<u16 *>(sp); \ + if (UNLIKELY(s)) { \ + if (UNLIKELY(size >= SHADOW_GRANULARITY || \ + ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \ + (s8)s)) { \ + if (__asan_test_only_reported_buggy_pointer) { \ + *__asan_test_only_reported_buggy_pointer = addr; \ + } else { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size); \ + } \ + } \ + } \ + } + +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + __asan_report_error(pc, bp, sp, addr, false, size); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + __asan_report_error(pc, bp, sp, addr, true, size); + } +} + +// Force the linker to keep the symbols for various ASan interface functions. +// We want to keep those in the executable in order to let the instrumented +// dynamic libraries access the symbol even if it is not used by the executable +// itself. This should help if the build system is removing dead code at link +// time. +static NOINLINE void force_interface_symbols() { + volatile int fake_condition = 0; // prevent dead condition elimination. + // __asan_report_* functions are noreturn, so we need a switch to prevent + // the compiler from removing any of them. + switch (fake_condition) { + case 1: __asan_report_load1(0); break; + case 2: __asan_report_load2(0); break; + case 3: __asan_report_load4(0); break; + case 4: __asan_report_load8(0); break; + case 5: __asan_report_load16(0); break; + case 6: __asan_report_store1(0); break; + case 7: __asan_report_store2(0); break; + case 8: __asan_report_store4(0); break; + case 9: __asan_report_store8(0); break; + case 10: __asan_report_store16(0); break; + case 12: __asan_register_globals(0, 0); break; + case 13: __asan_unregister_globals(0, 0); break; + case 14: __asan_set_death_callback(0); break; + case 15: __asan_set_error_report_callback(0); break; + case 16: __asan_handle_no_return(); break; + case 17: __asan_address_is_poisoned(0); break; + case 25: __asan_poison_memory_region(0, 0); break; + case 26: __asan_unpoison_memory_region(0, 0); break; + case 27: __asan_set_error_exit_code(0); break; + case 30: __asan_before_dynamic_init(0); break; + case 31: __asan_after_dynamic_init(); break; + case 32: __asan_poison_stack_memory(0, 0); break; + case 33: __asan_unpoison_stack_memory(0, 0); break; + case 34: __asan_region_is_poisoned(0, 0); break; + case 35: __asan_describe_address(0); break; + } +} + +static void asan_atexit() { + Printf("AddressSanitizer exit stats:\n"); + __asan_print_accumulated_stats(); + // Print AsanMappingProfile. + for (uptr i = 0; i < kAsanMappingProfileSize; i++) { + if (AsanMappingProfile[i] == 0) continue; + Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]); + } +} + +static void InitializeHighMemEnd() { +#if !ASAN_FIXED_MAPPING + kHighMemEnd = GetMaxVirtualAddress(); + // Increase kHighMemEnd to make sure it's properly + // aligned together with kHighMemBeg: + kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1; +#endif // !ASAN_FIXED_MAPPING + CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0); +} + +static void ProtectGap(uptr a, uptr size) { + CHECK_EQ(a, (uptr)Mprotect(a, size)); +} + +static void PrintAddressSpaceLayout() { + Printf("|| `[%p, %p]` || HighMem ||\n", + (void*)kHighMemBeg, (void*)kHighMemEnd); + Printf("|| `[%p, %p]` || HighShadow ||\n", + (void*)kHighShadowBeg, (void*)kHighShadowEnd); + if (kMidMemBeg) { + Printf("|| `[%p, %p]` || ShadowGap3 ||\n", + (void*)kShadowGap3Beg, (void*)kShadowGap3End); + Printf("|| `[%p, %p]` || MidMem ||\n", + (void*)kMidMemBeg, (void*)kMidMemEnd); + Printf("|| `[%p, %p]` || ShadowGap2 ||\n", + (void*)kShadowGap2Beg, (void*)kShadowGap2End); + Printf("|| `[%p, %p]` || MidShadow ||\n", + (void*)kMidShadowBeg, (void*)kMidShadowEnd); + } + Printf("|| `[%p, %p]` || ShadowGap ||\n", + (void*)kShadowGapBeg, (void*)kShadowGapEnd); + if (kLowShadowBeg) { + Printf("|| `[%p, %p]` || LowShadow ||\n", + (void*)kLowShadowBeg, (void*)kLowShadowEnd); + Printf("|| `[%p, %p]` || LowMem ||\n", + (void*)kLowMemBeg, (void*)kLowMemEnd); + } + Printf("MemToShadow(shadow): %p %p %p %p", + (void*)MEM_TO_SHADOW(kLowShadowBeg), + (void*)MEM_TO_SHADOW(kLowShadowEnd), + (void*)MEM_TO_SHADOW(kHighShadowBeg), + (void*)MEM_TO_SHADOW(kHighShadowEnd)); + if (kMidMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kMidShadowBeg), + (void*)MEM_TO_SHADOW(kMidShadowEnd)); + } + Printf("\n"); + Printf("redzone=%zu\n", (uptr)flags()->redzone); + Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); + Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20); + Printf("malloc_context_size=%zu\n", + (uptr)common_flags()->malloc_context_size); + + Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET); + CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); + if (kMidMemBeg) + CHECK(kMidShadowBeg > kLowShadowEnd && + kMidMemBeg > kMidShadowEnd && + kHighShadowBeg > kMidMemEnd); +} + +static void AsanInitInternal() { + if (LIKELY(asan_inited)) return; + SanitizerToolName = "AddressSanitizer"; + CHECK(!asan_init_is_running && "ASan init calls itself!"); + asan_init_is_running = true; + + // Initialize flags. This must be done early, because most of the + // initialization steps look at flags(). + const char *options = GetEnv("ASAN_OPTIONS"); + InitializeFlags(flags(), options); + + InitializeHighMemEnd(); + + // Make sure we are not statically linked. + AsanDoesNotSupportStaticLinkage(); + + // Install tool-specific callbacks in sanitizer_common. + SetDieCallback(AsanDie); + SetCheckFailedCallback(AsanCheckFailed); + SetPrintfAndReportCallback(AppendToErrorMessageBuffer); + + if (!flags()->start_deactivated) + ParseExtraActivationFlags(); + + __sanitizer_set_report_path(common_flags()->log_path); + __asan_option_detect_stack_use_after_return = + flags()->detect_stack_use_after_return; + CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); + + if (options) { + VReport(1, "Parsed ASAN_OPTIONS: %s\n", options); + } + + if (flags()->start_deactivated) + AsanStartDeactivated(); + + // Re-exec ourselves if we need to set additional env or command line args. + MaybeReexec(); + + // Setup internal allocator callback. + SetLowLevelAllocateCallback(OnLowLevelAllocate); + + InitializeAsanInterceptors(); + + // Enable system log ("adb logcat") on Android. + // Doing this before interceptors are initialized crashes in: + // AsanInitInternal -> android_log_write -> __interceptor_strcmp + AndroidLogInit(); + + ReplaceSystemMalloc(); + + uptr shadow_start = kLowShadowBeg; + if (kLowShadowBeg) + shadow_start -= GetMmapGranularity(); + bool full_shadow_is_available = + MemoryRangeIsAvailable(shadow_start, kHighShadowEnd); + +#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \ + !ASAN_FIXED_MAPPING + if (!full_shadow_is_available) { + kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0; + kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0; + } +#endif + + if (common_flags()->verbosity) + PrintAddressSpaceLayout(); + + DisableCoreDumperIfNecessary(); + + if (full_shadow_is_available) { + // mmap the low shadow plus at least one page at the left. + if (kLowShadowBeg) + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + // protect the gap. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); + } else if (kMidMemBeg && + MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) && + MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { + CHECK(kLowShadowBeg != kLowShadowEnd); + // mmap the low shadow plus at least one page at the left. + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + // mmap the mid shadow. + ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + // protect the gaps. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); + ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); + } else { + Report("Shadow memory range interleaves with an existing memory mapping. " + "ASan cannot proceed correctly. ABORTING.\n"); + DumpProcessMap(); + Die(); + } + + AsanTSDInit(PlatformTSDDtor); + InstallDeadlySignalHandlers(AsanOnSIGSEGV); + + InitializeAllocator(); + + // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited + // should be set to 1 prior to initializing the threads. + asan_inited = 1; + asan_init_is_running = false; + + if (flags()->atexit) + Atexit(asan_atexit); + + if (common_flags()->coverage) { + __sanitizer_cov_init(); + Atexit(__sanitizer_cov_dump); + } + + // interceptors + InitTlsSize(); + + // Create main thread. + AsanThread *main_thread = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, + /* stack */ nullptr, /* detached */ true); + CHECK_EQ(0, main_thread->tid()); + SetCurrentThread(main_thread); + main_thread->ThreadStart(internal_getpid(), + /* signal_thread_is_registered */ nullptr); + force_interface_symbols(); // no-op. + SanitizerInitializeUnwinder(); + +#if CAN_SANITIZE_LEAKS + __lsan::InitCommonLsan(false); + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { + Atexit(__lsan::DoLeakCheck); + } +#endif // CAN_SANITIZE_LEAKS + + InitializeSuppressions(); + + VReport(1, "AddressSanitizer Init done\n"); +} + +// Initialize as requested from some part of ASan runtime library (interceptors, +// allocator, etc). +void AsanInitFromRtl() { + AsanInitInternal(); +} + +#if ASAN_DYNAMIC +// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable +// (and thus normal initializer from .preinit_array haven't run). + +class AsanInitializer { +public: // NOLINT + AsanInitializer() { + AsanCheckIncompatibleRT(); + AsanCheckDynamicRTPrereqs(); + AsanInitFromRtl(); + } +}; + +static AsanInitializer asan_initializer; +#endif // ASAN_DYNAMIC + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char* __asan_default_options() { return ""; } +} // extern "C" +#endif + +int NOINLINE __asan_set_error_exit_code(int exit_code) { + int old = flags()->exitcode; + flags()->exitcode = exit_code; + return old; +} + +void NOINLINE __asan_handle_no_return() { + int local_stack; + AsanThread *curr_thread = GetCurrentThread(); + CHECK(curr_thread); + uptr PageSize = GetPageSizeCached(); + uptr top = curr_thread->stack_top(); + uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1); + static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M + if (top - bottom > kMaxExpectedCleanupSize) { + static bool reported_warning = false; + if (reported_warning) + return; + reported_warning = true; + Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " + "stack top: %p; bottom %p; size: %p (%zd)\n" + "False positive error reports may follow\n" + "For details see " + "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n", + top, bottom, top - bottom, top - bottom); + return; + } + PoisonShadow(bottom, top - bottom, 0); + if (curr_thread->has_fake_stack()) + curr_thread->fake_stack()->HandleNoReturn(); +} + +void NOINLINE __asan_set_death_callback(void (*callback)(void)) { + death_callback = callback; +} + +// Initialize as requested from instrumented application code. +// We use this call as a trigger to wake up ASan from deactivated state. +void __asan_init() { + AsanCheckIncompatibleRT(); + AsanActivate(); + AsanInitInternal(); +} diff --git a/contrib/compiler-rt/lib/asan/asan_stack.cc b/contrib/compiler-rt/lib/asan/asan_stack.cc new file mode 100644 index 000000000000..8188f3b5b6e9 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stack.cc @@ -0,0 +1,25 @@ +//===-- asan_stack.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Code for ASan stack trace. +//===----------------------------------------------------------------------===// +#include "asan_internal.h" +#include "asan_stack.h" + +// ------------------ Interface -------------- {{{1 + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_stack_trace() { + using namespace __asan; + PRINT_CURRENT_STACK(); +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/asan/asan_stack.h b/contrib/compiler-rt/lib/asan/asan_stack.h new file mode 100644 index 000000000000..a995256212e1 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stack.h @@ -0,0 +1,114 @@ +//===-- asan_stack.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_stack.cc. +//===----------------------------------------------------------------------===// +#ifndef ASAN_STACK_H +#define ASAN_STACK_H + +#include "asan_flags.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __asan { + +// Get the stack trace with the given pc and bp. +// The pc will be in the position 0 of the resulting stack trace. +// The bp may refer to the current frame or to the caller's frame. +ALWAYS_INLINE +void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, + uptr pc, uptr bp, void *context, + bool fast) { +#if SANITIZER_WINDOWS + stack->Unwind(max_depth, pc, bp, context, 0, 0, fast); +#else + AsanThread *t; + stack->size = 0; + if (LIKELY(asan_inited)) { + if ((t = GetCurrentThread()) && !t->isUnwinding()) { + // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace() + // yields the call stack of the signal's handler and not of the code + // that raised the signal (as it does on Linux). + if (SANITIZER_FREEBSD && t->isInDeadlySignal()) fast = true; + uptr stack_top = t->stack_top(); + uptr stack_bottom = t->stack_bottom(); + ScopedUnwinding unwind_scope(t); + stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast); + } else if (t == 0 && !fast) { + /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ + stack->Unwind(max_depth, pc, bp, context, 0, 0, false); + } + } +#endif // SANITIZER_WINDOWS +} + +} // namespace __asan + +// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors +// as early as possible (in functions exposed to the user), as we generally +// don't want stack trace to contain functions from ASan internals. + +#define GET_STACK_TRACE(max_size, fast) \ + BufferedStackTrace stack; \ + if (max_size <= 2) { \ + stack.size = max_size; \ + if (max_size > 0) { \ + stack.top_frame_bp = GET_CURRENT_FRAME(); \ + stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \ + if (max_size > 1) \ + stack.trace_buffer[1] = GET_CALLER_PC(); \ + } \ + } else { \ + GetStackTraceWithPcBpAndContext(&stack, max_size, \ + StackTrace::GetCurrentPc(), \ + GET_CURRENT_FRAME(), 0, fast); \ + } + +#define GET_STACK_TRACE_FATAL(pc, bp) \ + BufferedStackTrace stack; \ + GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ + common_flags()->fast_unwind_on_fatal) + +#define GET_STACK_TRACE_SIGNAL(sig) \ + BufferedStackTrace stack; \ + GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \ + (sig).pc, (sig).bp, (sig).context, \ + common_flags()->fast_unwind_on_fatal) + +#define GET_STACK_TRACE_FATAL_HERE \ + GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) + +#define GET_STACK_TRACE_CHECK_HERE \ + GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check) + +#define GET_STACK_TRACE_THREAD \ + GET_STACK_TRACE(kStackTraceMax, true) + +#define GET_STACK_TRACE_MALLOC \ + GET_STACK_TRACE(common_flags()->malloc_context_size, \ + common_flags()->fast_unwind_on_malloc) + +#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC + +#define PRINT_CURRENT_STACK() \ + { \ + GET_STACK_TRACE_FATAL_HERE; \ + stack.Print(); \ + } + +#define PRINT_CURRENT_STACK_CHECK() \ + { \ + GET_STACK_TRACE_CHECK_HERE; \ + stack.Print(); \ + } + +#endif // ASAN_STACK_H diff --git a/contrib/compiler-rt/lib/asan/asan_stats.cc b/contrib/compiler-rt/lib/asan/asan_stats.cc new file mode 100644 index 000000000000..a78b7b1e926d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stats.cc @@ -0,0 +1,179 @@ +//===-- asan_stats.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Code related to statistics collected by AddressSanitizer. +//===----------------------------------------------------------------------===// +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stats.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +AsanStats::AsanStats() { + Clear(); +} + +void AsanStats::Clear() { + CHECK(REAL(memset)); + REAL(memset)(this, 0, sizeof(AsanStats)); +} + +static void PrintMallocStatsArray(const char *prefix, + uptr (&array)[kNumberOfSizeClasses]) { + Printf("%s", prefix); + for (uptr i = 0; i < kNumberOfSizeClasses; i++) { + if (!array[i]) continue; + Printf("%zu:%zu; ", i, array[i]); + } + Printf("\n"); +} + +void AsanStats::Print() { + Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n", + malloced>>20, malloced_redzones>>20, mallocs); + Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs); + Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); + Printf("Stats: %zuM really freed by %zu calls\n", + really_freed>>20, real_frees); + Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n", + (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, + mmaps, munmaps); + + PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size); + PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); + PrintMallocStatsArray(" frees by size class: ", freed_by_size); + PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size); + Printf("Stats: malloc large: %zu small slow: %zu\n", + malloc_large, malloc_small_slow); +} + +void AsanStats::MergeFrom(const AsanStats *stats) { + uptr *dst_ptr = reinterpret_cast<uptr*>(this); + const uptr *src_ptr = reinterpret_cast<const uptr*>(stats); + uptr num_fields = sizeof(*this) / sizeof(uptr); + for (uptr i = 0; i < num_fields; i++) + dst_ptr[i] += src_ptr[i]; +} + +static BlockingMutex print_lock(LINKER_INITIALIZED); + +static AsanStats unknown_thread_stats(LINKER_INITIALIZED); +static AsanStats dead_threads_stats(LINKER_INITIALIZED); +static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED); +// Required for malloc_zone_statistics() on OS X. This can't be stored in +// per-thread AsanStats. +static uptr max_malloced_memory; + +static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) { + AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg); + AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); + if (AsanThread *t = tctx->thread) + accumulated_stats->MergeFrom(&t->stats()); +} + +static void GetAccumulatedStats(AsanStats *stats) { + stats->Clear(); + { + ThreadRegistryLock l(&asanThreadRegistry()); + asanThreadRegistry() + .RunCallbackForEachThreadLocked(MergeThreadStats, stats); + } + stats->MergeFrom(&unknown_thread_stats); + { + BlockingMutexLock lock(&dead_threads_stats_lock); + stats->MergeFrom(&dead_threads_stats); + } + // This is not very accurate: we may miss allocation peaks that happen + // between two updates of accumulated_stats_. For more accurate bookkeeping + // the maximum should be updated on every malloc(), which is unacceptable. + if (max_malloced_memory < stats->malloced) { + max_malloced_memory = stats->malloced; + } +} + +void FlushToDeadThreadStats(AsanStats *stats) { + BlockingMutexLock lock(&dead_threads_stats_lock); + dead_threads_stats.MergeFrom(stats); + stats->Clear(); +} + +void FillMallocStatistics(AsanMallocStats *malloc_stats) { + AsanStats stats; + GetAccumulatedStats(&stats); + malloc_stats->blocks_in_use = stats.mallocs; + malloc_stats->size_in_use = stats.malloced; + malloc_stats->max_size_in_use = max_malloced_memory; + malloc_stats->size_allocated = stats.mmaped; +} + +AsanStats &GetCurrentThreadStats() { + AsanThread *t = GetCurrentThread(); + return (t) ? t->stats() : unknown_thread_stats; +} + +static void PrintAccumulatedStats() { + AsanStats stats; + GetAccumulatedStats(&stats); + // Use lock to keep reports from mixing up. + BlockingMutexLock lock(&print_lock); + stats.Print(); + StackDepotStats *stack_depot_stats = StackDepotGetStats(); + Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", + stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); + PrintInternalAllocatorStats(); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +uptr __sanitizer_get_current_allocated_bytes() { + AsanStats stats; + GetAccumulatedStats(&stats); + uptr malloced = stats.malloced; + uptr freed = stats.freed; + // Return sane value if malloced < freed due to racy + // way we update accumulated stats. + return (malloced > freed) ? malloced - freed : 1; +} + +uptr __sanitizer_get_heap_size() { + AsanStats stats; + GetAccumulatedStats(&stats); + return stats.mmaped - stats.munmaped; +} + +uptr __sanitizer_get_free_bytes() { + AsanStats stats; + GetAccumulatedStats(&stats); + uptr total_free = stats.mmaped + - stats.munmaped + + stats.really_freed + + stats.really_freed_redzones; + uptr total_used = stats.malloced + + stats.malloced_redzones; + // Return sane value if total_free < total_used due to racy + // way we update accumulated stats. + return (total_free > total_used) ? total_free - total_used : 1; +} + +uptr __sanitizer_get_unmapped_bytes() { + return 0; +} + +void __asan_print_accumulated_stats() { + PrintAccumulatedStats(); +} diff --git a/contrib/compiler-rt/lib/asan/asan_stats.h b/contrib/compiler-rt/lib/asan/asan_stats.h new file mode 100644 index 000000000000..c66848dc5713 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_stats.h @@ -0,0 +1,78 @@ +//===-- asan_stats.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for statistics. +//===----------------------------------------------------------------------===// +#ifndef ASAN_STATS_H +#define ASAN_STATS_H + +#include "asan_allocator.h" +#include "asan_internal.h" + +namespace __asan { + +// AsanStats struct is NOT thread-safe. +// Each AsanThread has its own AsanStats, which are sometimes flushed +// to the accumulated AsanStats. +struct AsanStats { + // AsanStats must be a struct consisting of uptr fields only. + // When merging two AsanStats structs, we treat them as arrays of uptr. + uptr mallocs; + uptr malloced; + uptr malloced_redzones; + uptr frees; + uptr freed; + uptr real_frees; + uptr really_freed; + uptr really_freed_redzones; + uptr reallocs; + uptr realloced; + uptr mmaps; + uptr mmaped; + uptr munmaps; + uptr munmaped; + uptr mmaped_by_size[kNumberOfSizeClasses]; + uptr malloced_by_size[kNumberOfSizeClasses]; + uptr freed_by_size[kNumberOfSizeClasses]; + uptr really_freed_by_size[kNumberOfSizeClasses]; + + uptr malloc_large; + uptr malloc_small_slow; + + // Ctor for global AsanStats (accumulated stats for dead threads). + explicit AsanStats(LinkerInitialized) { } + // Creates empty stats. + AsanStats(); + + void Print(); // Prints formatted stats to stderr. + void Clear(); + void MergeFrom(const AsanStats *stats); +}; + +// Returns stats for GetCurrentThread(), or stats for fake "unknown thread" +// if GetCurrentThread() returns 0. +AsanStats &GetCurrentThreadStats(); +// Flushes a given stats into accumulated stats of dead threads. +void FlushToDeadThreadStats(AsanStats *stats); + +// A cross-platform equivalent of malloc_statistics_t on Mac OS. +struct AsanMallocStats { + uptr blocks_in_use; + uptr size_in_use; + uptr max_size_in_use; + uptr size_allocated; +}; + +void FillMallocStatistics(AsanMallocStats *malloc_stats); + +} // namespace __asan + +#endif // ASAN_STATS_H diff --git a/contrib/compiler-rt/lib/asan/asan_suppressions.cc b/contrib/compiler-rt/lib/asan/asan_suppressions.cc new file mode 100644 index 000000000000..ef554716faa0 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_suppressions.cc @@ -0,0 +1,87 @@ +//===-- asan_suppressions.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Issue suppression and suppression-related functions. +//===----------------------------------------------------------------------===// + +#include "asan_suppressions.h" + +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +static bool suppressions_inited = false; + +void InitializeSuppressions() { + CHECK(!suppressions_inited); + SuppressionContext::InitIfNecessary(); + suppressions_inited = true; +} + +bool IsInterceptorSuppressed(const char *interceptor_name) { + CHECK(suppressions_inited); + SuppressionContext *ctx = SuppressionContext::Get(); + Suppression *s; + // Match "interceptor_name" suppressions. + return ctx->Match(interceptor_name, SuppressionInterceptorName, &s); +} + +bool HaveStackTraceBasedSuppressions() { + CHECK(suppressions_inited); + SuppressionContext *ctx = SuppressionContext::Get(); + return ctx->HasSuppressionType(SuppressionInterceptorViaFunction) || + ctx->HasSuppressionType(SuppressionInterceptorViaLibrary); +} + +bool IsStackTraceSuppressed(const StackTrace *stack) { + CHECK(suppressions_inited); + if (!HaveStackTraceBasedSuppressions()) + return false; + + SuppressionContext *ctx = SuppressionContext::Get(); + Symbolizer *symbolizer = Symbolizer::GetOrInit(); + Suppression *s; + for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { + uptr addr = stack->trace[i]; + + if (ctx->HasSuppressionType(SuppressionInterceptorViaLibrary)) { + const char *module_name; + uptr module_offset; + // Match "interceptor_via_lib" suppressions. + if (symbolizer->GetModuleNameAndOffsetForPC(addr, &module_name, + &module_offset) && + ctx->Match(module_name, SuppressionInterceptorViaLibrary, &s)) { + return true; + } + } + + if (ctx->HasSuppressionType(SuppressionInterceptorViaFunction)) { + SymbolizedStack *frames = symbolizer->SymbolizePC(addr); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + const char *function_name = cur->info.function; + if (!function_name) { + continue; + } + // Match "interceptor_via_fun" suppressions. + if (ctx->Match(function_name, SuppressionInterceptorViaFunction, &s)) { + frames->ClearAll(); + return true; + } + } + frames->ClearAll(); + } + } + return false; +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/asan_suppressions.h b/contrib/compiler-rt/lib/asan/asan_suppressions.h new file mode 100644 index 000000000000..cd7ba2ef0ae7 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_suppressions.h @@ -0,0 +1,29 @@ +//===-- asan_suppressions.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_suppressions.cc. +//===----------------------------------------------------------------------===// +#ifndef ASAN_SUPPRESSIONS_H +#define ASAN_SUPPRESSIONS_H + +#include "asan_internal.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __asan { + +void InitializeSuppressions(); +bool IsInterceptorSuppressed(const char *interceptor_name); +bool HaveStackTraceBasedSuppressions(); +bool IsStackTraceSuppressed(const StackTrace *stack); + +} // namespace __asan + +#endif // ASAN_SUPPRESSIONS_H diff --git a/contrib/compiler-rt/lib/asan/asan_thread.cc b/contrib/compiler-rt/lib/asan/asan_thread.cc new file mode 100644 index 000000000000..9af5706d86d0 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_thread.cc @@ -0,0 +1,358 @@ +//===-- asan_thread.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Thread-related code. +//===----------------------------------------------------------------------===// +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_poisoning.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// AsanThreadContext implementation. + +struct CreateThreadContextArgs { + AsanThread *thread; + StackTrace *stack; +}; + +void AsanThreadContext::OnCreated(void *arg) { + CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); + if (args->stack) + stack_id = StackDepotPut(*args->stack); + thread = args->thread; + thread->set_context(this); +} + +void AsanThreadContext::OnFinished() { + // Drop the link to the AsanThread object. + thread = 0; +} + +// MIPS requires aligned address +static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; +static ThreadRegistry *asan_thread_registry; + +static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED); +static LowLevelAllocator allocator_for_thread_context; + +static ThreadContextBase *GetAsanThreadContext(u32 tid) { + BlockingMutexLock lock(&mu_for_thread_context); + return new(allocator_for_thread_context) AsanThreadContext(tid); +} + +ThreadRegistry &asanThreadRegistry() { + static bool initialized; + // Don't worry about thread_safety - this should be called when there is + // a single thread. + if (!initialized) { + // Never reuse ASan threads: we store pointer to AsanThreadContext + // in TSD and can't reliably tell when no more TSD destructors will + // be called. It would be wrong to reuse AsanThreadContext for another + // thread before all TSD destructors will be called for it. + asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry( + GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads); + initialized = true; + } + return *asan_thread_registry; +} + +AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { + return static_cast<AsanThreadContext *>( + asanThreadRegistry().GetThreadLocked(tid)); +} + +// AsanThread implementation. + +AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, + bool detached) { + uptr PageSize = GetPageSizeCached(); + uptr size = RoundUpTo(sizeof(AsanThread), PageSize); + AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); + thread->start_routine_ = start_routine; + thread->arg_ = arg; + CreateThreadContextArgs args = { thread, stack }; + asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached, + parent_tid, &args); + + return thread; +} + +void AsanThread::TSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + VReport(1, "T%d TSDDtor\n", context->tid); + if (context->thread) + context->thread->Destroy(); +} + +void AsanThread::Destroy() { + int tid = this->tid(); + VReport(1, "T%d exited\n", tid); + + malloc_storage().CommitBack(); + if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack(); + asanThreadRegistry().FinishThread(tid); + FlushToDeadThreadStats(&stats_); + // We also clear the shadow on thread destruction because + // some code may still be executing in later TSD destructors + // and we don't want it to have any poisoned stack. + ClearShadowForThreadStackAndTLS(); + DeleteFakeStack(tid); + uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); + UnmapOrDie(this, size); + DTLS_Destroy(); +} + +// We want to create the FakeStack lazyly on the first use, but not eralier +// than the stack size is known and the procedure has to be async-signal safe. +FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { + uptr stack_size = this->stack_size(); + if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. + return 0; + uptr old_val = 0; + // fake_stack_ has 3 states: + // 0 -- not initialized + // 1 -- being initialized + // ptr -- initialized + // This CAS checks if the state was 0 and if so changes it to state 1, + // if that was successful, it initializes the pointer. + if (atomic_compare_exchange_strong( + reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL, + memory_order_relaxed)) { + uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size)); + CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); + stack_size_log = + Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log)); + stack_size_log = + Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log)); + fake_stack_ = FakeStack::Create(stack_size_log); + SetTLSFakeStack(fake_stack_); + return fake_stack_; + } + return 0; +} + +void AsanThread::Init() { + fake_stack_ = 0; // Will be initialized lazily if needed. + CHECK_EQ(this->stack_size(), 0U); + SetThreadStackAndTls(); + CHECK_GT(this->stack_size(), 0U); + CHECK(AddrIsInMem(stack_bottom_)); + CHECK(AddrIsInMem(stack_top_ - 1)); + ClearShadowForThreadStackAndTLS(); + int local = 0; + VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), + (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, + &local); + AsanPlatformThreadInit(); +} + +thread_return_t AsanThread::ThreadStart( + uptr os_id, atomic_uintptr_t *signal_thread_is_registered) { + Init(); + asanThreadRegistry().StartThread(tid(), os_id, 0); + if (signal_thread_is_registered) + atomic_store(signal_thread_is_registered, 1, memory_order_release); + + if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); + + if (!start_routine_) { + // start_routine_ == 0 if we're on the main thread or on one of the + // OS X libdispatch worker threads. But nobody is supposed to call + // ThreadStart() for the worker threads. + CHECK_EQ(tid(), 0); + return 0; + } + + thread_return_t res = start_routine_(arg_); + + // On POSIX systems we defer this to the TSD destructor. LSan will consider + // the thread's memory as non-live from the moment we call Destroy(), even + // though that memory might contain pointers to heap objects which will be + // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before + // the TSD destructors have run might cause false positives in LSan. + if (!SANITIZER_POSIX) + this->Destroy(); + + return res; +} + +void AsanThread::SetThreadStackAndTls() { + uptr tls_size = 0; + GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_, + &tls_size); + stack_top_ = stack_bottom_ + stack_size_; + tls_end_ = tls_begin_ + tls_size; + + int local; + CHECK(AddrIsInStack((uptr)&local)); +} + +void AsanThread::ClearShadowForThreadStackAndTLS() { + PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); + if (tls_begin_ != tls_end_) + PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0); +} + +bool AsanThread::GetStackFrameAccessByAddr(uptr addr, + StackFrameAccess *access) { + uptr bottom = 0; + if (AddrIsInStack(addr)) { + bottom = stack_bottom(); + } else if (has_fake_stack()) { + bottom = fake_stack()->AddrIsInFakeStack(addr); + CHECK(bottom); + access->offset = addr - bottom; + access->frame_pc = ((uptr*)bottom)[2]; + access->frame_descr = (const char *)((uptr*)bottom)[1]; + return true; + } + uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr. + u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); + u8 *shadow_bottom = (u8*)MemToShadow(bottom); + + while (shadow_ptr >= shadow_bottom && + *shadow_ptr != kAsanStackLeftRedzoneMagic) { + shadow_ptr--; + } + + while (shadow_ptr >= shadow_bottom && + *shadow_ptr == kAsanStackLeftRedzoneMagic) { + shadow_ptr--; + } + + if (shadow_ptr < shadow_bottom) { + return false; + } + + uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1)); + CHECK(ptr[0] == kCurrentStackFrameMagic); + access->offset = addr - (uptr)ptr; + access->frame_pc = ptr[2]; + access->frame_descr = (const char*)ptr[1]; + return true; +} + +static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, + void *addr) { + AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); + AsanThread *t = tctx->thread; + if (!t) return false; + if (t->AddrIsInStack((uptr)addr)) return true; + if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr)) + return true; + return false; +} + +AsanThread *GetCurrentThread() { + AsanThreadContext *context = + reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); + if (!context) { + if (SANITIZER_ANDROID) { + // On Android, libc constructor is called _after_ asan_init, and cleans up + // TSD. Try to figure out if this is still the main thread by the stack + // address. We are not entirely sure that we have correct main thread + // limits, so only do this magic on Android, and only if the found thread + // is the main thread. + AsanThreadContext *tctx = GetThreadContextByTidLocked(0); + if (ThreadStackContainsAddress(tctx, &context)) { + SetCurrentThread(tctx->thread); + return tctx->thread; + } + } + return 0; + } + return context->thread; +} + +void SetCurrentThread(AsanThread *t) { + CHECK(t->context()); + VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(), + (void *)GetThreadSelf()); + // Make sure we do not reset the current AsanThread. + CHECK_EQ(0, AsanTSDGet()); + AsanTSDSet(t->context()); + CHECK_EQ(t->context(), AsanTSDGet()); +} + +u32 GetCurrentTidOrInvalid() { + AsanThread *t = GetCurrentThread(); + return t ? t->tid() : kInvalidTid; +} + +AsanThread *FindThreadByStackAddress(uptr addr) { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *tctx = static_cast<AsanThreadContext *>( + asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, + (void *)addr)); + return tctx ? tctx->thread : 0; +} + +void EnsureMainThreadIDIsCorrect() { + AsanThreadContext *context = + reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); + if (context && (context->tid == 0)) + context->os_id = GetTid(); +} + +__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) { + __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( + __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); + if (!context) return 0; + return context->thread; +} +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, + uptr *cache_begin, uptr *cache_end) { + __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); + if (!t) return false; + *stack_begin = t->stack_bottom(); + *stack_end = t->stack_top(); + *tls_begin = t->tls_begin(); + *tls_end = t->tls_end(); + // ASan doesn't keep allocator caches in TLS, so these are unused. + *cache_begin = 0; + *cache_end = 0; + return true; +} + +void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback, + void *arg) { + __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); + if (t && t->has_fake_stack()) + t->fake_stack()->ForEachFakeFrame(callback, arg); +} + +void LockThreadRegistry() { + __asan::asanThreadRegistry().Lock(); +} + +void UnlockThreadRegistry() { + __asan::asanThreadRegistry().Unlock(); +} + +void EnsureMainThreadIDIsCorrect() { + __asan::EnsureMainThreadIDIsCorrect(); +} +} // namespace __lsan diff --git a/contrib/compiler-rt/lib/asan/asan_thread.h b/contrib/compiler-rt/lib/asan/asan_thread.h new file mode 100644 index 000000000000..9da136c87ef1 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_thread.h @@ -0,0 +1,187 @@ +//===-- asan_thread.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_thread.cc. +//===----------------------------------------------------------------------===// +#ifndef ASAN_THREAD_H +#define ASAN_THREAD_H + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_fake_stack.h" +#include "asan_stats.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_thread_registry.h" + +namespace __asan { + +const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits. +const u32 kMaxNumberOfThreads = (1 << 22); // 4M + +class AsanThread; + +// These objects are created for every thread and are never deleted, +// so we can find them by tid even if the thread is long dead. +class AsanThreadContext : public ThreadContextBase { + public: + explicit AsanThreadContext(int tid) + : ThreadContextBase(tid), + announced(false), + destructor_iterations(kPthreadDestructorIterations), + stack_id(0), + thread(0) { + } + bool announced; + u8 destructor_iterations; + u32 stack_id; + AsanThread *thread; + + void OnCreated(void *arg); + void OnFinished(); +}; + +// AsanThreadContext objects are never freed, so we need many of them. +COMPILER_CHECK(sizeof(AsanThreadContext) <= 256); + +// AsanThread are stored in TSD and destroyed when the thread dies. +class AsanThread { + public: + static AsanThread *Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, bool detached); + static void TSDDtor(void *tsd); + void Destroy(); + + void Init(); // Should be called from the thread itself. + thread_return_t ThreadStart(uptr os_id, + atomic_uintptr_t *signal_thread_is_registered); + + uptr stack_top() { return stack_top_; } + uptr stack_bottom() { return stack_bottom_; } + uptr stack_size() { return stack_size_; } + uptr tls_begin() { return tls_begin_; } + uptr tls_end() { return tls_end_; } + u32 tid() { return context_->tid; } + AsanThreadContext *context() { return context_; } + void set_context(AsanThreadContext *context) { context_ = context; } + + struct StackFrameAccess { + uptr offset; + uptr frame_pc; + const char *frame_descr; + }; + bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access); + + bool AddrIsInStack(uptr addr) { + return addr >= stack_bottom_ && addr < stack_top_; + } + + void DeleteFakeStack(int tid) { + if (!fake_stack_) return; + FakeStack *t = fake_stack_; + fake_stack_ = 0; + SetTLSFakeStack(0); + t->Destroy(tid); + } + + bool has_fake_stack() { + return (reinterpret_cast<uptr>(fake_stack_) > 1); + } + + FakeStack *fake_stack() { + if (!__asan_option_detect_stack_use_after_return) + return 0; + if (!has_fake_stack()) + return AsyncSignalSafeLazyInitFakeStack(); + return fake_stack_; + } + + // True is this thread is currently unwinding stack (i.e. collecting a stack + // trace). Used to prevent deadlocks on platforms where libc unwinder calls + // malloc internally. See PR17116 for more details. + bool isUnwinding() const { return unwinding_; } + void setUnwinding(bool b) { unwinding_ = b; } + + // True if we are in a deadly signal handler. + bool isInDeadlySignal() const { return in_deadly_signal_; } + void setInDeadlySignal(bool b) { in_deadly_signal_ = b; } + + AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } + AsanStats &stats() { return stats_; } + + private: + // NOTE: There is no AsanThread constructor. It is allocated + // via mmap() and *must* be valid in zero-initialized state. + void SetThreadStackAndTls(); + void ClearShadowForThreadStackAndTLS(); + FakeStack *AsyncSignalSafeLazyInitFakeStack(); + + AsanThreadContext *context_; + thread_callback_t start_routine_; + void *arg_; + uptr stack_top_; + uptr stack_bottom_; + // stack_size_ == stack_top_ - stack_bottom_; + // It needs to be set in a async-signal-safe manner. + uptr stack_size_; + uptr tls_begin_; + uptr tls_end_; + + FakeStack *fake_stack_; + AsanThreadLocalMallocStorage malloc_storage_; + AsanStats stats_; + bool unwinding_; + bool in_deadly_signal_; +}; + +// ScopedUnwinding is a scope for stacktracing member of a context +class ScopedUnwinding { + public: + explicit ScopedUnwinding(AsanThread *t) : thread(t) { + t->setUnwinding(true); + } + ~ScopedUnwinding() { thread->setUnwinding(false); } + + private: + AsanThread *thread; +}; + +// ScopedDeadlySignal is a scope for handling deadly signals. +class ScopedDeadlySignal { + public: + explicit ScopedDeadlySignal(AsanThread *t) : thread(t) { + if (thread) thread->setInDeadlySignal(true); + } + ~ScopedDeadlySignal() { + if (thread) thread->setInDeadlySignal(false); + } + + private: + AsanThread *thread; +}; + +// Returns a single instance of registry. +ThreadRegistry &asanThreadRegistry(); + +// Must be called under ThreadRegistryLock. +AsanThreadContext *GetThreadContextByTidLocked(u32 tid); + +// Get the current thread. May return 0. +AsanThread *GetCurrentThread(); +void SetCurrentThread(AsanThread *t); +u32 GetCurrentTidOrInvalid(); +AsanThread *FindThreadByStackAddress(uptr addr); + +// Used to handle fork(). +void EnsureMainThreadIDIsCorrect(); +} // namespace __asan + +#endif // ASAN_THREAD_H diff --git a/contrib/compiler-rt/lib/asan/asan_win.cc b/contrib/compiler-rt/lib/asan/asan_win.cc new file mode 100644 index 000000000000..4f02b022fe79 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win.cc @@ -0,0 +1,164 @@ +//===-- asan_win.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Windows-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS +#include <windows.h> + +#include <dbghelp.h> +#include <stdlib.h> + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_report.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mutex.h" + +extern "C" { + SANITIZER_INTERFACE_ATTRIBUTE + int __asan_should_detect_stack_use_after_return() { + __asan_init(); + return __asan_option_detect_stack_use_after_return; + } +} + +namespace __asan { + +// ---------------------- TSD ---------------- {{{1 +static bool tsd_key_inited = false; + +static __declspec(thread) void *fake_tsd = 0; + +void AsanTSDInit(void (*destructor)(void *tsd)) { + // FIXME: we're ignoring the destructor for now. + tsd_key_inited = true; +} + +void *AsanTSDGet() { + CHECK(tsd_key_inited); + return fake_tsd; +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_key_inited); + fake_tsd = tsd; +} + +void PlatformTSDDtor(void *tsd) { + AsanThread::TSDDtor(tsd); +} +// ---------------------- Various stuff ---------------- {{{1 +void MaybeReexec() { + // No need to re-exec on Windows. +} + +void *AsanDoesNotSupportStaticLinkage() { +#if defined(_DEBUG) +#error Please build the runtime with a non-debug CRT: /MD or /MT +#endif + return 0; +} + +void AsanCheckDynamicRTPrereqs() {} + +void AsanCheckIncompatibleRT() {} + +void AsanPlatformThreadInit() { + // Nothing here for now. +} + +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} + +void AsanOnSIGSEGV(int, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; + +SignalContext SignalContext::Create(void *siginfo, void *context) { + EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD*)siginfo; + CONTEXT *context_record = (CONTEXT*)context; + + uptr pc = (uptr)exception_record->ExceptionAddress; +#ifdef _WIN64 + uptr bp = (uptr)context_record->Rbp; + uptr sp = (uptr)context_record->Rsp; +#else + uptr bp = (uptr)context_record->Ebp; + uptr sp = (uptr)context_record->Esp; +#endif + uptr access_addr = exception_record->ExceptionInformation[1]; + + return SignalContext(context, access_addr, pc, sp, bp); +} + +static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { + EXCEPTION_RECORD *exception_record = info->ExceptionRecord; + CONTEXT *context = info->ContextRecord; + + if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || + exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) { + const char *description = + (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) + ? "access-violation" + : "in-page-error"; + SignalContext sig = SignalContext::Create(exception_record, context); + ReportSIGSEGV(description, sig); + } + + // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. + + return default_seh_handler(info); +} + +// We want to install our own exception handler (EH) to print helpful reports +// on access violations and whatnot. Unfortunately, the CRT initializers assume +// they are run before any user code and drop any previously-installed EHs on +// the floor, so we can't install our handler inside __asan_init. +// (See crt0dat.c in the CRT sources for the details) +// +// Things get even more complicated with the dynamic runtime, as it finishes its +// initialization before the .exe module CRT begins to initialize. +// +// For the static runtime (-MT), it's enough to put a callback to +// __asan_set_seh_filter in the last section for C initializers. +// +// For the dynamic runtime (-MD), we want link the same +// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter +// will be called for each instrumented module. This ensures that at least one +// __asan_set_seh_filter call happens after the .exe module CRT is initialized. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +int __asan_set_seh_filter() { + // We should only store the previous handler if it's not our own handler in + // order to avoid loops in the EH chain. + auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler); + if (prev_seh_handler != &SEHHandler) + default_seh_handler = prev_seh_handler; + return 0; +} + +#if !ASAN_DYNAMIC +// Put a pointer to __asan_set_seh_filter at the end of the global list +// of C initializers, after the default EH is set by the CRT. +#pragma section(".CRT$XIZ", long, read) // NOLINT +static __declspec(allocate(".CRT$XIZ")) + int (*__intercept_seh)() = __asan_set_seh_filter; +#endif + +} // namespace __asan + +#endif // _WIN32 diff --git a/contrib/compiler-rt/lib/asan/asan_win_dll_thunk.cc b/contrib/compiler-rt/lib/asan/asan_win_dll_thunk.cc new file mode 100644 index 000000000000..b38a2d16087f --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win_dll_thunk.cc @@ -0,0 +1,376 @@ +//===-- asan_win_dll_thunk.cc ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file defines a family of thunks that should be statically linked into +// the DLLs that have ASan instrumentation in order to delegate the calls to the +// shared runtime that lives in the main binary. +// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the +// details. +//===----------------------------------------------------------------------===// + +// Only compile this code when buidling asan_dll_thunk.lib +// Using #ifdef rather than relying on Makefiles etc. +// simplifies the build procedure. +#ifdef ASAN_DLL_THUNK +#include "asan_init_version.h" +#include "interception/interception.h" + +// ---------- Function interception helper functions and macros ----------- {{{1 +extern "C" { +void *__stdcall GetModuleHandleA(const char *module_name); +void *__stdcall GetProcAddress(void *module, const char *proc_name); +void abort(); +} + +static void *getRealProcAddressOrDie(const char *name) { + void *ret = GetProcAddress(GetModuleHandleA(0), name); + if (!ret) + abort(); + return ret; +} + +// We need to intercept some functions (e.g. ASan interface, memory allocator -- +// let's call them "hooks") exported by the DLL thunk and forward the hooks to +// the runtime in the main module. +// However, we don't want to keep two lists of these hooks. +// To avoid that, the list of hooks should be defined using the +// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted +// at once by calling INTERCEPT_HOOKS(). + +// Use macro+template magic to automatically generate the list of hooks. +// Each hook at line LINE defines a template class with a static +// FunctionInterceptor<LINE>::Execute() method intercepting the hook. +// The default implementation of FunctionInterceptor<LINE> is to call +// the Execute() method corresponding to the previous line. +template<int LINE> +struct FunctionInterceptor { + static void Execute() { FunctionInterceptor<LINE-1>::Execute(); } +}; + +// There shouldn't be any hooks with negative definition line number. +template<> +struct FunctionInterceptor<0> { + static void Execute() {} +}; + +#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \ + template<> struct FunctionInterceptor<__LINE__> { \ + static void Execute() { \ + void *wrapper = getRealProcAddressOrDie(main_function); \ + if (!__interception::OverrideFunction((uptr)dll_function, \ + (uptr)wrapper, 0)) \ + abort(); \ + FunctionInterceptor<__LINE__-1>::Execute(); \ + } \ + }; + +// Special case of hooks -- ASan own interface functions. Those are only called +// after __asan_init, thus an empty implementation is sufficient. +#define INTERFACE_FUNCTION(name) \ + extern "C" __declspec(noinline) void name() { \ + volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \ + __debugbreak(); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name) + +// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE. +#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute + +// We can't define our own version of strlen etc. because that would lead to +// link-time or even type mismatch errors. Instead, we can declare a function +// just to be able to get its address. Me may miss the first few calls to the +// functions since it can be called before __asan_init, but that would lead to +// false negatives in the startup code before user's global initializers, which +// isn't a big deal. +#define INTERCEPT_LIBRARY_FUNCTION(name) \ + extern "C" void name(); \ + INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name) + +// Disable compiler warnings that show up if we declare our own version +// of a compiler intrinsic (e.g. strlen). +#pragma warning(disable: 4391) +#pragma warning(disable: 4392) + +static void InterceptHooks(); +// }}} + +// ---------- Function wrapping helpers ----------------------------------- {{{1 +#define WRAP_V_V(name) \ + extern "C" void name() { \ + typedef void (*fntype)(); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + fn(); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_V_W(name) \ + extern "C" void name(void *arg) { \ + typedef void (*fntype)(void *arg); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + fn(arg); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_V_WW(name) \ + extern "C" void name(void *arg1, void *arg2) { \ + typedef void (*fntype)(void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + fn(arg1, arg2); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_V_WWW(name) \ + extern "C" void name(void *arg1, void *arg2, void *arg3) { \ + typedef void *(*fntype)(void *, void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + fn(arg1, arg2, arg3); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_V(name) \ + extern "C" void *name() { \ + typedef void *(*fntype)(); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_W(name) \ + extern "C" void *name(void *arg) { \ + typedef void *(*fntype)(void *arg); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(arg); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_WW(name) \ + extern "C" void *name(void *arg1, void *arg2) { \ + typedef void *(*fntype)(void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(arg1, arg2); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_WWW(name) \ + extern "C" void *name(void *arg1, void *arg2, void *arg3) { \ + typedef void *(*fntype)(void *, void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(arg1, arg2, arg3); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_WWWW(name) \ + extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \ + typedef void *(*fntype)(void *, void *, void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(arg1, arg2, arg3, arg4); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_WWWWW(name) \ + extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \ + void *arg5) { \ + typedef void *(*fntype)(void *, void *, void *, void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(arg1, arg2, arg3, arg4, arg5); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); + +#define WRAP_W_WWWWWW(name) \ + extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \ + void *arg5, void *arg6) { \ + typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \ + static fntype fn = (fntype)getRealProcAddressOrDie(#name); \ + return fn(arg1, arg2, arg3, arg4, arg5, arg6); \ + } \ + INTERCEPT_WHEN_POSSIBLE(#name, name); +// }}} + +// ----------------- ASan own interface functions -------------------- +// Don't use the INTERFACE_FUNCTION machinery for this function as we actually +// want to call it in the __asan_init interceptor. +WRAP_W_V(__asan_should_detect_stack_use_after_return) + +extern "C" { + int __asan_option_detect_stack_use_after_return; + + // Manually wrap __asan_init as we need to initialize + // __asan_option_detect_stack_use_after_return afterwards. + void __asan_init() { + typedef void (*fntype)(); + static fntype fn = 0; + // __asan_init is expected to be called by only one thread. + if (fn) return; + + fn = (fntype)getRealProcAddressOrDie(__asan_init_name); + fn(); + __asan_option_detect_stack_use_after_return = + (__asan_should_detect_stack_use_after_return() != 0); + + InterceptHooks(); + } +} + +INTERFACE_FUNCTION(__asan_handle_no_return) + +INTERFACE_FUNCTION(__asan_report_store1) +INTERFACE_FUNCTION(__asan_report_store2) +INTERFACE_FUNCTION(__asan_report_store4) +INTERFACE_FUNCTION(__asan_report_store8) +INTERFACE_FUNCTION(__asan_report_store16) +INTERFACE_FUNCTION(__asan_report_store_n) + +INTERFACE_FUNCTION(__asan_report_load1) +INTERFACE_FUNCTION(__asan_report_load2) +INTERFACE_FUNCTION(__asan_report_load4) +INTERFACE_FUNCTION(__asan_report_load8) +INTERFACE_FUNCTION(__asan_report_load16) +INTERFACE_FUNCTION(__asan_report_load_n) + +INTERFACE_FUNCTION(__asan_store1) +INTERFACE_FUNCTION(__asan_store2) +INTERFACE_FUNCTION(__asan_store4) +INTERFACE_FUNCTION(__asan_store8) +INTERFACE_FUNCTION(__asan_store16) +INTERFACE_FUNCTION(__asan_storeN) + +INTERFACE_FUNCTION(__asan_load1) +INTERFACE_FUNCTION(__asan_load2) +INTERFACE_FUNCTION(__asan_load4) +INTERFACE_FUNCTION(__asan_load8) +INTERFACE_FUNCTION(__asan_load16) +INTERFACE_FUNCTION(__asan_loadN) + +INTERFACE_FUNCTION(__asan_memcpy); +INTERFACE_FUNCTION(__asan_memset); +INTERFACE_FUNCTION(__asan_memmove); + +INTERFACE_FUNCTION(__asan_register_globals) +INTERFACE_FUNCTION(__asan_unregister_globals) + +INTERFACE_FUNCTION(__asan_before_dynamic_init) +INTERFACE_FUNCTION(__asan_after_dynamic_init) + +INTERFACE_FUNCTION(__asan_poison_stack_memory) +INTERFACE_FUNCTION(__asan_unpoison_stack_memory) + +INTERFACE_FUNCTION(__asan_poison_memory_region) +INTERFACE_FUNCTION(__asan_unpoison_memory_region) + +INTERFACE_FUNCTION(__asan_address_is_poisoned) +INTERFACE_FUNCTION(__asan_region_is_poisoned) + +INTERFACE_FUNCTION(__asan_get_current_fake_stack) +INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack) + +INTERFACE_FUNCTION(__asan_stack_malloc_0) +INTERFACE_FUNCTION(__asan_stack_malloc_1) +INTERFACE_FUNCTION(__asan_stack_malloc_2) +INTERFACE_FUNCTION(__asan_stack_malloc_3) +INTERFACE_FUNCTION(__asan_stack_malloc_4) +INTERFACE_FUNCTION(__asan_stack_malloc_5) +INTERFACE_FUNCTION(__asan_stack_malloc_6) +INTERFACE_FUNCTION(__asan_stack_malloc_7) +INTERFACE_FUNCTION(__asan_stack_malloc_8) +INTERFACE_FUNCTION(__asan_stack_malloc_9) +INTERFACE_FUNCTION(__asan_stack_malloc_10) + +INTERFACE_FUNCTION(__asan_stack_free_0) +INTERFACE_FUNCTION(__asan_stack_free_1) +INTERFACE_FUNCTION(__asan_stack_free_2) +INTERFACE_FUNCTION(__asan_stack_free_4) +INTERFACE_FUNCTION(__asan_stack_free_5) +INTERFACE_FUNCTION(__asan_stack_free_6) +INTERFACE_FUNCTION(__asan_stack_free_7) +INTERFACE_FUNCTION(__asan_stack_free_8) +INTERFACE_FUNCTION(__asan_stack_free_9) +INTERFACE_FUNCTION(__asan_stack_free_10) + +INTERFACE_FUNCTION(__sanitizer_cov_module_init) + +// TODO(timurrrr): Add more interface functions on the as-needed basis. + +// ----------------- Memory allocation functions --------------------- +WRAP_V_W(free) +WRAP_V_WW(_free_dbg) + +WRAP_W_W(malloc) +WRAP_W_WWWW(_malloc_dbg) + +WRAP_W_WW(calloc) +WRAP_W_WWWWW(_calloc_dbg) +WRAP_W_WWW(_calloc_impl) + +WRAP_W_WW(realloc) +WRAP_W_WWW(_realloc_dbg) +WRAP_W_WWW(_recalloc) + +WRAP_W_W(_msize) +WRAP_W_W(_expand) +WRAP_W_W(_expand_dbg) + +// TODO(timurrrr): Might want to add support for _aligned_* allocation +// functions to detect a bit more bugs. Those functions seem to wrap malloc(). + +// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc). + +INTERCEPT_LIBRARY_FUNCTION(atoi); +INTERCEPT_LIBRARY_FUNCTION(atol); +INTERCEPT_LIBRARY_FUNCTION(_except_handler3); + +// _except_handler4 checks -GS cookie which is different for each module, so we +// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4). +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} + +INTERCEPT_LIBRARY_FUNCTION(frexp); +INTERCEPT_LIBRARY_FUNCTION(longjmp); +INTERCEPT_LIBRARY_FUNCTION(memchr); +INTERCEPT_LIBRARY_FUNCTION(memcmp); +INTERCEPT_LIBRARY_FUNCTION(memcpy); +INTERCEPT_LIBRARY_FUNCTION(memmove); +INTERCEPT_LIBRARY_FUNCTION(memset); +INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT +INTERCEPT_LIBRARY_FUNCTION(strchr); +INTERCEPT_LIBRARY_FUNCTION(strcmp); +INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT +INTERCEPT_LIBRARY_FUNCTION(strlen); +INTERCEPT_LIBRARY_FUNCTION(strncat); +INTERCEPT_LIBRARY_FUNCTION(strncmp); +INTERCEPT_LIBRARY_FUNCTION(strncpy); +INTERCEPT_LIBRARY_FUNCTION(strnlen); +INTERCEPT_LIBRARY_FUNCTION(strtol); +INTERCEPT_LIBRARY_FUNCTION(wcslen); + +// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS +// is defined. +void InterceptHooks() { + INTERCEPT_HOOKS(); + INTERCEPT_FUNCTION(_except_handler4); +} + +// We want to call __asan_init before C/C++ initializers/constructors are +// executed, otherwise functions like memset might be invoked. +// For some strange reason, merely linking in asan_preinit.cc doesn't work +// as the callback is never called... Is link.exe doing something too smart? + +// In DLLs, the callbacks are expected to return 0, +// otherwise CRT initialization fails. +static int call_asan_init() { + __asan_init(); + return 0; +} +#pragma section(".CRT$XIB", long, read) // NOLINT +__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init; + +#endif // ASAN_DLL_THUNK diff --git a/contrib/compiler-rt/lib/asan/asan_win_dynamic_runtime_thunk.cc b/contrib/compiler-rt/lib/asan/asan_win_dynamic_runtime_thunk.cc new file mode 100644 index 000000000000..3a4de7dbf1fb --- /dev/null +++ b/contrib/compiler-rt/lib/asan/asan_win_dynamic_runtime_thunk.cc @@ -0,0 +1,52 @@ +//===-- asan_win_uar_thunk.cc ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file defines things that need to be present in the application modules +// to interact with the ASan DLL runtime correctly and can't be implemented +// using the default "import library" generated when linking the DLL RTL. +// +// This includes: +// - forwarding the detect_stack_use_after_return runtime option +// - installing a custom SEH handler +// +//===----------------------------------------------------------------------===// + +// Only compile this code when buidling asan_dynamic_runtime_thunk.lib +// Using #ifdef rather than relying on Makefiles etc. +// simplifies the build procedure. +#ifdef ASAN_DYNAMIC_RUNTIME_THUNK +extern "C" { +__declspec(dllimport) int __asan_set_seh_filter(); +__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); + +// Define a copy of __asan_option_detect_stack_use_after_return that should be +// used when linking an MD runtime with a set of object files on Windows. +// +// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return', +// so normally we would just dllimport it. Unfortunately, the dllimport +// attribute adds __imp_ prefix to the symbol name of a variable. +// Since in general we don't know if a given TU is going to be used +// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows +// just to work around this issue, let's clone the a variable that is +// constant after initialization anyways. +int __asan_option_detect_stack_use_after_return = + __asan_should_detect_stack_use_after_return(); + +// Set the ASan-specific SEH handler at the end of CRT initialization of each +// module (see asan_win.cc for the details). +// +// Unfortunately, putting a pointer to __asan_set_seh_filter into +// __asan_intercept_seh gets optimized out, so we have to use an extra function. +static int SetSEHFilter() { return __asan_set_seh_filter(); } +#pragma section(".CRT$XIZ", long, read) // NOLINT +__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter; +} +#endif // ASAN_DYNAMIC_RUNTIME_THUNK diff --git a/contrib/compiler-rt/lib/asan/scripts/asan_device_setup b/contrib/compiler-rt/lib/asan/scripts/asan_device_setup new file mode 100755 index 000000000000..a620f51b0836 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/scripts/asan_device_setup @@ -0,0 +1,267 @@ +#!/bin/bash +#===- lib/asan/scripts/asan_device_setup -----------------------------------===# +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +# Prepare Android device to run ASan applications. +# +#===------------------------------------------------------------------------===# + +set -e + +HERE="$(cd "$(dirname "$0")" && pwd)" + +revert=no +extra_options= +device= +lib= + +function usage { + echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]" + echo " --revert: Uninstall ASan from the device." + echo " --lib: Path to ASan runtime library." + echo " --extra-options: Extra ASAN_OPTIONS." + echo " --device: Install to the given device. Use 'adb devices' to find" + echo " device-id." + echo + exit 1 +} + +function get_device_arch { # OUTVAR + local _outvar=$1 + local _ABI=$($ADB shell getprop ro.product.cpu.abi) + local _ARCH= + if [[ $_ABI == x86* ]]; then + _ARCH=i686 + elif [[ $_ABI == armeabi* ]]; then + _ARCH=arm + else + echo "Unrecognized device ABI: $_ABI" + exit 1 + fi + eval $_outvar=\$_ARCH +} + +while [[ $# > 0 ]]; do + case $1 in + --revert) + revert=yes + ;; + --extra-options) + shift + if [[ $# == 0 ]]; then + echo "--extra-options requires an argument." + exit 1 + fi + extra_options="$1" + ;; + --lib) + shift + if [[ $# == 0 ]]; then + echo "--lib requires an argument." + exit 1 + fi + lib="$1" + ;; + --device) + shift + if [[ $# == 0 ]]; then + echo "--device requires an argument." + exit 1 + fi + device="$1" + ;; + *) + usage + ;; + esac + shift +done + +ADB=${ADB:-adb} +if [[ x$device != x ]]; then + ADB="$ADB -s $device" +fi + +echo '>> Remounting /system rw' +$ADB root +$ADB wait-for-device +$ADB remount +$ADB wait-for-device + +get_device_arch ARCH +echo "Target architecture: $ARCH" +ASAN_RT="libclang_rt.asan-$ARCH-android.so" + +if [[ x$revert == xyes ]]; then + echo '>> Uninstalling ASan' + + if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then + echo '>> Pre-L device detected.' + $ADB shell mv /system/bin/app_process.real /system/bin/app_process + $ADB shell rm /system/bin/asanwrapper + $ADB shell rm /system/lib/$ASAN_RT + else + $ADB shell rm /system/bin/app_process.wrap + $ADB shell rm /system/bin/asanwrapper + $ADB shell rm /system/lib/$ASAN_RT + $ADB shell rm /system/bin/app_process + $ADB shell ln -s /system/bin/app_process32 /system/bin/app_process + fi + + echo '>> Restarting shell' + $ADB shell stop + $ADB shell start + + echo '>> Done' + exit 0 +fi + +if [[ -d "$lib" ]]; then + ASAN_RT_PATH="$lib" +elif [[ -f "$lib" && "$lib" == *"$ASAN_RT" ]]; then + ASAN_RT_PATH=$(dirname "$lib") +elif [[ -f "$HERE/$ASAN_RT" ]]; then + ASAN_RT_PATH="$HERE" +elif [[ $(basename "$HERE") == "bin" ]]; then + # We could be in the toolchain's base directory. + # Consider ../lib, ../lib/asan, ../lib/linux and ../lib/clang/$VERSION/lib/linux. + P=$(ls "$HERE"/../lib/"$ASAN_RT" "$HERE"/../lib/asan/"$ASAN_RT" "$HERE"/../lib/linux/"$ASAN_RT" "$HERE"/../lib/clang/*/lib/linux/"$ASAN_RT" 2>/dev/null | sort | tail -1) + if [[ -n "$P" ]]; then + ASAN_RT_PATH="$(dirname "$P")" + fi +fi + +if [[ -z "$ASAN_RT_PATH" || ! -f "$ASAN_RT_PATH/$ASAN_RT" ]]; then + echo ">> ASan runtime library not found" + exit 1 +fi + +TMPDIRBASE=$(mktemp -d) +TMPDIROLD="$TMPDIRBASE/old" +TMPDIR="$TMPDIRBASE/new" +mkdir "$TMPDIROLD" + +RELEASE=$($ADB shell getprop ro.build.version.release) +PRE_L=0 +if echo "$RELEASE" | grep '^4\.' >&/dev/null; then + PRE_L=1 +fi + +if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then + + if $ADB pull /system/bin/app_process.real /dev/null >&/dev/null; then + echo '>> Old-style ASan installation detected. Reverting.' + $ADB shell mv /system/bin/app_process.real /system/bin/app_process + fi + + echo '>> Pre-L device detected. Setting up app_process symlink.' + $ADB shell mv /system/bin/app_process /system/bin/app_process32 + $ADB shell ln -s /system/bin/app_process32 /system/bin/app_process +fi + +echo '>> Copying files from the device' +$ADB pull /system/bin/app_process.wrap "$TMPDIROLD" || true +$ADB pull /system/bin/asanwrapper "$TMPDIROLD" || true +$ADB pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true +cp -r "$TMPDIROLD" "$TMPDIR" + +if [[ -f "$TMPDIR/app_process.wrap" ]]; then + echo ">> Previous installation detected" +else + echo ">> New installation" +fi + +echo '>> Generating wrappers' + +cp "$ASAN_RT_PATH/$ASAN_RT" "$TMPDIR/" + +# FIXME: alloc_dealloc_mismatch=0 prevents a failure in libdvm startup, +# which may or may not be a real bug (probably not). +ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0 + +# On Android-L not allowing user segv handler breaks some applications. +if $ADB shell 'echo $LD_PRELOAD' | grep libsigchain.so >&/dev/null; then + ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1" +fi + +if [[ x$extra_options != x ]] ; then + ASAN_OPTIONS="$ASAN_OPTIONS,$extra_options" +fi + +# Zygote wrapper. +cat <<EOF >"$TMPDIR/app_process.wrap" +#!/system/bin/sh-from-zygote +ASAN_OPTIONS=$ASAN_OPTIONS \\ +LD_PRELOAD=\$LD_PRELOAD:$ASAN_RT \\ +exec /system/bin/app_process32 \$@ + +EOF + +# General command-line tool wrapper (use for anything that's not started as +# zygote). +cat <<EOF >"$TMPDIR/asanwrapper" +#!/system/bin/sh +LD_PRELOAD=$ASAN_RT \\ +exec \$@ + +EOF + +if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then + echo '>> Pushing files to the device' + $ADB push "$TMPDIR/$ASAN_RT" /system/lib/ + $ADB push "$TMPDIR/app_process.wrap" /system/bin/app_process.wrap + $ADB push "$TMPDIR/asanwrapper" /system/bin/asanwrapper + + $ADB shell rm /system/bin/app_process + $ADB shell ln -s /system/bin/app_process.wrap /system/bin/app_process + + $ADB shell chown root.shell \ + /system/lib/"$ASAN_RT" \ + /system/bin/app_process.wrap \ + /system/bin/asanwrapper + $ADB shell chmod 644 \ + /system/lib/"$ASAN_RT" + $ADB shell chmod 755 \ + /system/bin/app_process.wrap \ + /system/bin/asanwrapper + + # Make SELinux happy by keeping app_process wrapper and the shell + # it runs on in zygote domain. + ENFORCING=0 + if $ADB shell getenforce | grep Enforcing >/dev/null; then + # Sometimes shell is not allowed to change file contexts. + # Temporarily switch to permissive. + ENFORCING=1 + $ADB shell setenforce 0 + fi + + $ADB shell cp /system/bin/sh /system/bin/sh-from-zygote + + if [[ PRE_L -eq 1 ]]; then + CTX=u:object_r:system_file:s0 + else + CTX=u:object_r:zygote_exec:s0 + fi + $ADB shell chcon $CTX \ + /system/bin/sh-from-zygote \ + /system/bin/app_process.wrap \ + /system/bin/app_process32 + + if [ $ENFORCING == 1 ]; then + $ADB shell setenforce 1 + fi + + echo '>> Restarting shell (asynchronous)' + $ADB shell stop + $ADB shell start + + echo '>> Please wait until the device restarts' +else + echo '>> Device is up to date' +fi + +rm -r "$TMPDIRBASE" diff --git a/contrib/compiler-rt/lib/asan/scripts/asan_symbolize.py b/contrib/compiler-rt/lib/asan/scripts/asan_symbolize.py new file mode 100755 index 000000000000..5fca136b6950 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/scripts/asan_symbolize.py @@ -0,0 +1,469 @@ +#!/usr/bin/env python +#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===# +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +#===------------------------------------------------------------------------===# +import argparse +import bisect +import getopt +import os +import pty +import re +import subprocess +import sys +import termios + +symbolizers = {} +DEBUG = False +demangle = False +binutils_prefix = None +sysroot_path = None +binary_name_filter = None +fix_filename_patterns = None +logfile = sys.stdin + +# FIXME: merge the code that calls fix_filename(). +def fix_filename(file_name): + if fix_filename_patterns: + for path_to_cut in fix_filename_patterns: + file_name = re.sub('.*' + path_to_cut, '', file_name) + file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name) + file_name = re.sub('.*crtstuff.c:0', '???:0', file_name) + return file_name + +def sysroot_path_filter(binary_name): + return sysroot_path + binary_name + +def guess_arch(addr): + # Guess which arch we're running. 10 = len('0x') + 8 hex digits. + if len(addr) > 10: + return 'x86_64' + else: + return 'i386' + +class Symbolizer(object): + def __init__(self): + pass + + def symbolize(self, addr, binary, offset): + """Symbolize the given address (pair of binary and offset). + + Overriden in subclasses. + Args: + addr: virtual address of an instruction. + binary: path to executable/shared object containing this instruction. + offset: instruction offset in the @binary. + Returns: + list of strings (one string for each inlined frame) describing + the code locations for this instruction (that is, function name, file + name, line and column numbers). + """ + return None + + +class LLVMSymbolizer(Symbolizer): + def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]): + super(LLVMSymbolizer, self).__init__() + self.symbolizer_path = symbolizer_path + self.default_arch = default_arch + self.system = system + self.dsym_hints = dsym_hints + self.pipe = self.open_llvm_symbolizer() + + def open_llvm_symbolizer(self): + cmd = [self.symbolizer_path, + '--use-symbol-table=true', + '--demangle=%s' % demangle, + '--functions=short', + '--inlining=true', + '--default-arch=%s' % self.default_arch] + if self.system == 'Darwin': + for hint in self.dsym_hints: + cmd.append('--dsym-hint=%s' % hint) + if DEBUG: + print ' '.join(cmd) + try: + result = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + except OSError: + result = None + return result + + def symbolize(self, addr, binary, offset): + """Overrides Symbolizer.symbolize.""" + if not self.pipe: + return None + result = [] + try: + symbolizer_input = '"%s" %s' % (binary, offset) + if DEBUG: + print symbolizer_input + print >> self.pipe.stdin, symbolizer_input + while True: + function_name = self.pipe.stdout.readline().rstrip() + if not function_name: + break + file_name = self.pipe.stdout.readline().rstrip() + file_name = fix_filename(file_name) + if (not function_name.startswith('??') or + not file_name.startswith('??')): + # Append only non-trivial frames. + result.append('%s in %s %s' % (addr, function_name, + file_name)) + except Exception: + result = [] + if not result: + result = None + return result + + +def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]): + symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH') + if not symbolizer_path: + symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH') + if not symbolizer_path: + # Assume llvm-symbolizer is in PATH. + symbolizer_path = 'llvm-symbolizer' + return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints) + + +class Addr2LineSymbolizer(Symbolizer): + def __init__(self, binary): + super(Addr2LineSymbolizer, self).__init__() + self.binary = binary + self.pipe = self.open_addr2line() + + def open_addr2line(self): + addr2line_tool = 'addr2line' + if binutils_prefix: + addr2line_tool = binutils_prefix + addr2line_tool + cmd = [addr2line_tool, '-f'] + if demangle: + cmd += ['--demangle'] + cmd += ['-e', self.binary] + if DEBUG: + print ' '.join(cmd) + return subprocess.Popen(cmd, + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + + def symbolize(self, addr, binary, offset): + """Overrides Symbolizer.symbolize.""" + if self.binary != binary: + return None + try: + print >> self.pipe.stdin, offset + function_name = self.pipe.stdout.readline().rstrip() + file_name = self.pipe.stdout.readline().rstrip() + except Exception: + function_name = '' + file_name = '' + file_name = fix_filename(file_name) + return ['%s in %s %s' % (addr, function_name, file_name)] + + +class UnbufferedLineConverter(object): + """ + Wrap a child process that responds to each line of input with one line of + output. Uses pty to trick the child into providing unbuffered output. + """ + def __init__(self, args, close_stderr=False): + pid, fd = pty.fork() + if pid == 0: + # We're the child. Transfer control to command. + if close_stderr: + dev_null = os.open('/dev/null', 0) + os.dup2(dev_null, 2) + os.execvp(args[0], args) + else: + # Disable echoing. + attr = termios.tcgetattr(fd) + attr[3] = attr[3] & ~termios.ECHO + termios.tcsetattr(fd, termios.TCSANOW, attr) + # Set up a file()-like interface to the child process + self.r = os.fdopen(fd, "r", 1) + self.w = os.fdopen(os.dup(fd), "w", 1) + + def convert(self, line): + self.w.write(line + "\n") + return self.readline() + + def readline(self): + return self.r.readline().rstrip() + + +class DarwinSymbolizer(Symbolizer): + def __init__(self, addr, binary): + super(DarwinSymbolizer, self).__init__() + self.binary = binary + self.arch = guess_arch(addr) + self.open_atos() + + def open_atos(self): + if DEBUG: + print 'atos -o %s -arch %s' % (self.binary, self.arch) + cmdline = ['atos', '-o', self.binary, '-arch', self.arch] + self.atos = UnbufferedLineConverter(cmdline, close_stderr=True) + + def symbolize(self, addr, binary, offset): + """Overrides Symbolizer.symbolize.""" + if self.binary != binary: + return None + atos_line = self.atos.convert('0x%x' % int(offset, 16)) + while "got symbolicator for" in atos_line: + atos_line = self.atos.readline() + # A well-formed atos response looks like this: + # foo(type1, type2) (in object.name) (filename.cc:80) + match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line) + if DEBUG: + print 'atos_line: ', atos_line + if match: + function_name = match.group(1) + function_name = re.sub('\(.*?\)', '', function_name) + file_name = fix_filename(match.group(3)) + return ['%s in %s %s' % (addr, function_name, file_name)] + else: + return ['%s in %s' % (addr, atos_line)] + + +# Chain several symbolizers so that if one symbolizer fails, we fall back +# to the next symbolizer in chain. +class ChainSymbolizer(Symbolizer): + def __init__(self, symbolizer_list): + super(ChainSymbolizer, self).__init__() + self.symbolizer_list = symbolizer_list + + def symbolize(self, addr, binary, offset): + """Overrides Symbolizer.symbolize.""" + for symbolizer in self.symbolizer_list: + if symbolizer: + result = symbolizer.symbolize(addr, binary, offset) + if result: + return result + return None + + def append_symbolizer(self, symbolizer): + self.symbolizer_list.append(symbolizer) + + +def BreakpadSymbolizerFactory(binary): + suffix = os.getenv('BREAKPAD_SUFFIX') + if suffix: + filename = binary + suffix + if os.access(filename, os.F_OK): + return BreakpadSymbolizer(filename) + return None + + +def SystemSymbolizerFactory(system, addr, binary): + if system == 'Darwin': + return DarwinSymbolizer(addr, binary) + elif system == 'Linux': + return Addr2LineSymbolizer(binary) + + +class BreakpadSymbolizer(Symbolizer): + def __init__(self, filename): + super(BreakpadSymbolizer, self).__init__() + self.filename = filename + lines = file(filename).readlines() + self.files = [] + self.symbols = {} + self.address_list = [] + self.addresses = {} + # MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t + fragments = lines[0].rstrip().split() + self.arch = fragments[2] + self.debug_id = fragments[3] + self.binary = ' '.join(fragments[4:]) + self.parse_lines(lines[1:]) + + def parse_lines(self, lines): + cur_function_addr = '' + for line in lines: + fragments = line.split() + if fragments[0] == 'FILE': + assert int(fragments[1]) == len(self.files) + self.files.append(' '.join(fragments[2:])) + elif fragments[0] == 'PUBLIC': + self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:]) + elif fragments[0] in ['CFI', 'STACK']: + pass + elif fragments[0] == 'FUNC': + cur_function_addr = int(fragments[1], 16) + if not cur_function_addr in self.symbols.keys(): + self.symbols[cur_function_addr] = ' '.join(fragments[4:]) + else: + # Line starting with an address. + addr = int(fragments[0], 16) + self.address_list.append(addr) + # Tuple of symbol address, size, line, file number. + self.addresses[addr] = (cur_function_addr, + int(fragments[1], 16), + int(fragments[2]), + int(fragments[3])) + self.address_list.sort() + + def get_sym_file_line(self, addr): + key = None + if addr in self.addresses.keys(): + key = addr + else: + index = bisect.bisect_left(self.address_list, addr) + if index == 0: + return None + else: + key = self.address_list[index - 1] + sym_id, size, line_no, file_no = self.addresses[key] + symbol = self.symbols[sym_id] + filename = self.files[file_no] + if addr < key + size: + return symbol, filename, line_no + else: + return None + + def symbolize(self, addr, binary, offset): + if self.binary != binary: + return None + res = self.get_sym_file_line(int(offset, 16)) + if res: + function_name, file_name, line_no = res + result = ['%s in %s %s:%d' % ( + addr, function_name, file_name, line_no)] + print result + return result + else: + return None + + +class SymbolizationLoop(object): + def __init__(self, binary_name_filter=None, dsym_hint_producer=None): + # Used by clients who may want to supply a different binary name. + # E.g. in Chrome several binaries may share a single .dSYM. + self.binary_name_filter = binary_name_filter + self.dsym_hint_producer = dsym_hint_producer + self.system = os.uname()[0] + if self.system not in ['Linux', 'Darwin', 'FreeBSD']: + raise Exception('Unknown system') + self.llvm_symbolizers = {} + self.last_llvm_symbolizer = None + self.dsym_hints = set([]) + self.frame_no = 0 + + def symbolize_address(self, addr, binary, offset): + # On non-Darwin (i.e. on platforms without .dSYM debug info) always use + # a single symbolizer binary. + # On Darwin, if the dsym hint producer is present: + # 1. check whether we've seen this binary already; if so, + # use |llvm_symbolizers[binary]|, which has already loaded the debug + # info for this binary (might not be the case for + # |last_llvm_symbolizer|); + # 2. otherwise check if we've seen all the hints for this binary already; + # if so, reuse |last_llvm_symbolizer| which has the full set of hints; + # 3. otherwise create a new symbolizer and pass all currently known + # .dSYM hints to it. + if not binary in self.llvm_symbolizers: + use_last_symbolizer = True + if self.system == 'Darwin' and self.dsym_hint_producer: + dsym_hints_for_binary = set(self.dsym_hint_producer(binary)) + use_last_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints) + self.dsym_hints |= dsym_hints_for_binary + if self.last_llvm_symbolizer and use_last_symbolizer: + self.llvm_symbolizers[binary] = self.last_llvm_symbolizer + else: + self.last_llvm_symbolizer = LLVMSymbolizerFactory( + self.system, guess_arch(addr), self.dsym_hints) + self.llvm_symbolizers[binary] = self.last_llvm_symbolizer + # Use the chain of symbolizers: + # Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos + # (fall back to next symbolizer if the previous one fails). + if not binary in symbolizers: + symbolizers[binary] = ChainSymbolizer( + [BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]]) + result = symbolizers[binary].symbolize(addr, binary, offset) + if result is None: + # Initialize system symbolizer only if other symbolizers failed. + symbolizers[binary].append_symbolizer( + SystemSymbolizerFactory(self.system, addr, binary)) + result = symbolizers[binary].symbolize(addr, binary, offset) + # The system symbolizer must produce some result. + assert result + return result + + def get_symbolized_lines(self, symbolized_lines): + if not symbolized_lines: + return [self.current_line] + else: + result = [] + for symbolized_frame in symbolized_lines: + result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip())) + self.frame_no += 1 + return result + + def process_logfile(self): + self.frame_no = 0 + while True: + line = logfile.readline() + if not line: + break + processed = self.process_line(line) + print '\n'.join(processed) + + def process_line(self, line): + self.current_line = line.rstrip() + #0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45) + stack_trace_line_format = ( + '^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)') + match = re.match(stack_trace_line_format, line) + if not match: + return [self.current_line] + if DEBUG: + print line + _, frameno_str, addr, binary, offset = match.groups() + if frameno_str == '0': + # Assume that frame #0 is the first frame of new stack trace. + self.frame_no = 0 + original_binary = binary + if self.binary_name_filter: + binary = self.binary_name_filter(binary) + symbolized_line = self.symbolize_address(addr, binary, offset) + if not symbolized_line: + if original_binary != binary: + symbolized_line = self.symbolize_address(addr, binary, offset) + return self.get_symbolized_lines(symbolized_line) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, + description='ASan symbolization script', + epilog='''Example of use: + asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" -s "$HOME/SymbolFiles" < asan.log''') + parser.add_argument('path_to_cut', nargs='*', + help='pattern to be cut from the result file path ') + parser.add_argument('-d','--demangle', action='store_true', + help='demangle function names') + parser.add_argument('-s', metavar='SYSROOT', + help='set path to sysroot for sanitized binaries') + parser.add_argument('-c', metavar='CROSS_COMPILE', + help='set prefix for binutils') + parser.add_argument('-l','--logfile', default=sys.stdin, type=argparse.FileType('r'), + help='set log file name to parse, default is stdin') + args = parser.parse_args() + if args.path_to_cut: + fix_filename_patterns = args.path_to_cut + if args.demangle: + demangle = True + if args.s: + binary_name_filter = sysroot_path_filter + sysroot_path = args.s + if args.c: + binutils_prefix = args.c + if args.logfile: + logfile = args.logfile + else: + logfile = sys.stdin + loop = SymbolizationLoop(binary_name_filter) + loop.process_logfile() diff --git a/contrib/compiler-rt/lib/asan/tests/asan_asm_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_asm_test.cc new file mode 100644 index 000000000000..1d8b04d611bd --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_asm_test.cc @@ -0,0 +1,267 @@ +//===-- asan_asm_test.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +#if defined(__linux__) + +#if defined(__x86_64__) || (defined(__i386__) && defined(__SSE2__)) + +#include <emmintrin.h> + +namespace { + +template<typename T> void asm_write(T *ptr, T val); +template<typename T> T asm_read(T *ptr); +template<typename T> void asm_rep_movs(T *dst, T *src, size_t n); + +} // End of anonymous namespace + +#endif // defined(__x86_64__) || (defined(__i386__) && defined(__SSE2__)) + +#if defined(__x86_64__) + +namespace { + +#define DECLARE_ASM_WRITE(Type, Size, Mov, Reg) \ +template<> void asm_write<Type>(Type *ptr, Type val) { \ + __asm__( \ + Mov " %[val], (%[ptr]) \n\t" \ + : \ + : [ptr] "r" (ptr), [val] Reg (val) \ + : "memory" \ + ); \ +} + +#define DECLARE_ASM_READ(Type, Size, Mov, Reg) \ +template<> Type asm_read<Type>(Type *ptr) { \ + Type res; \ + __asm__( \ + Mov " (%[ptr]), %[res] \n\t" \ + : [res] Reg (res) \ + : [ptr] "r" (ptr) \ + : "memory" \ + ); \ + return res; \ +} + +#define DECLARE_ASM_REP_MOVS(Type, Movs) \ + template <> void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \ + __asm__("rep " Movs " \n\t" \ + : \ + : "D"(dst), "S"(src), "c"(size) \ + : "rsi", "rdi", "rcx", "memory"); \ + } + +DECLARE_ASM_WRITE(U8, "8", "movq", "r"); +DECLARE_ASM_READ(U8, "8", "movq", "=r"); +DECLARE_ASM_REP_MOVS(U8, "movsq"); + +} // End of anonymous namespace + +#endif // defined(__x86_64__) + +#if defined(__i386__) && defined(__SSE2__) + +namespace { + +#define DECLARE_ASM_WRITE(Type, Size, Mov, Reg) \ +template<> void asm_write<Type>(Type *ptr, Type val) { \ + __asm__( \ + Mov " %[val], (%[ptr]) \n\t" \ + : \ + : [ptr] "r" (ptr), [val] Reg (val) \ + : "memory" \ + ); \ +} + +#define DECLARE_ASM_READ(Type, Size, Mov, Reg) \ +template<> Type asm_read<Type>(Type *ptr) { \ + Type res; \ + __asm__( \ + Mov " (%[ptr]), %[res] \n\t" \ + : [res] Reg (res) \ + : [ptr] "r" (ptr) \ + : "memory" \ + ); \ + return res; \ +} + +#define DECLARE_ASM_REP_MOVS(Type, Movs) \ + template <> void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \ + __asm__("rep " Movs " \n\t" \ + : \ + : "D"(dst), "S"(src), "c"(size) \ + : "esi", "edi", "ecx", "memory"); \ + } + +} // End of anonymous namespace + +#endif // defined(__i386__) && defined(__SSE2__) + +#if defined(__x86_64__) || (defined(__i386__) && defined(__SSE2__)) + +namespace { + +DECLARE_ASM_WRITE(U1, "1", "movb", "r"); +DECLARE_ASM_WRITE(U2, "2", "movw", "r"); +DECLARE_ASM_WRITE(U4, "4", "movl", "r"); +DECLARE_ASM_WRITE(__m128i, "16", "movaps", "x"); + +DECLARE_ASM_READ(U1, "1", "movb", "=r"); +DECLARE_ASM_READ(U2, "2", "movw", "=r"); +DECLARE_ASM_READ(U4, "4", "movl", "=r"); +DECLARE_ASM_READ(__m128i, "16", "movaps", "=x"); + +DECLARE_ASM_REP_MOVS(U1, "movsb"); +DECLARE_ASM_REP_MOVS(U2, "movsw"); +DECLARE_ASM_REP_MOVS(U4, "movsl"); + +template<typename T> void TestAsmWrite(const char *DeathPattern) { + T *buf = new T; + EXPECT_DEATH(asm_write(&buf[1], static_cast<T>(0)), DeathPattern); + T var = 0x12; + asm_write(&var, static_cast<T>(0x21)); + ASSERT_EQ(static_cast<T>(0x21), var); + delete buf; +} + +template<> void TestAsmWrite<__m128i>(const char *DeathPattern) { + char *buf = new char[16]; + char *p = buf + 16; + if (((uintptr_t) p % 16) != 0) + p = buf + 8; + assert(((uintptr_t) p % 16) == 0); + __m128i val = _mm_set1_epi16(0x1234); + EXPECT_DEATH(asm_write<__m128i>((__m128i*) p, val), DeathPattern); + __m128i var = _mm_set1_epi16(0x4321); + asm_write(&var, val); + ASSERT_EQ(0x1234, _mm_extract_epi16(var, 0)); + delete [] buf; +} + +template<typename T> void TestAsmRead(const char *DeathPattern) { + T *buf = new T; + EXPECT_DEATH(asm_read(&buf[1]), DeathPattern); + T var = 0x12; + ASSERT_EQ(static_cast<T>(0x12), asm_read(&var)); + delete buf; +} + +template<> void TestAsmRead<__m128i>(const char *DeathPattern) { + char *buf = new char[16]; + char *p = buf + 16; + if (((uintptr_t) p % 16) != 0) + p = buf + 8; + assert(((uintptr_t) p % 16) == 0); + EXPECT_DEATH(asm_read<__m128i>((__m128i*) p), DeathPattern); + __m128i val = _mm_set1_epi16(0x1234); + ASSERT_EQ(0x1234, _mm_extract_epi16(asm_read(&val), 0)); + delete [] buf; +} + +U4 AsmLoad(U4 *a) { + U4 r; + __asm__("movl (%[a]), %[r] \n\t" : [r] "=r" (r) : [a] "r" (a) : "memory"); + return r; +} + +void AsmStore(U4 r, U4 *a) { + __asm__("movl %[r], (%[a]) \n\t" : : [a] "r" (a), [r] "r" (r) : "memory"); +} + +template <typename T> +void TestAsmRepMovs(const char *DeathPatternRead, + const char *DeathPatternWrite) { + T src_good[4] = { 0x0, 0x1, 0x2, 0x3 }; + T dst_good[4] = {}; + asm_rep_movs(dst_good, src_good, 4); + ASSERT_EQ(static_cast<T>(0x0), dst_good[0]); + ASSERT_EQ(static_cast<T>(0x1), dst_good[1]); + ASSERT_EQ(static_cast<T>(0x2), dst_good[2]); + ASSERT_EQ(static_cast<T>(0x3), dst_good[3]); + + T dst_bad[3]; + EXPECT_DEATH(asm_rep_movs(dst_bad, src_good, 4), DeathPatternWrite); + + T src_bad[3] = { 0x0, 0x1, 0x2 }; + EXPECT_DEATH(asm_rep_movs(dst_good, src_bad, 4), DeathPatternRead); + + T* dp = dst_bad + 4; + T* sp = src_bad + 4; + asm_rep_movs(dp, sp, 0); +} + +} // End of anonymous namespace + +TEST(AddressSanitizer, asm_load_store) { + U4* buf = new U4[2]; + EXPECT_DEATH(AsmLoad(&buf[3]), "READ of size 4"); + EXPECT_DEATH(AsmStore(0x1234, &buf[3]), "WRITE of size 4"); + delete [] buf; +} + +TEST(AddressSanitizer, asm_rw) { + TestAsmWrite<U1>("WRITE of size 1"); + TestAsmWrite<U2>("WRITE of size 2"); + TestAsmWrite<U4>("WRITE of size 4"); +#if defined(__x86_64__) + TestAsmWrite<U8>("WRITE of size 8"); +#endif // defined(__x86_64__) + TestAsmWrite<__m128i>("WRITE of size 16"); + + TestAsmRead<U1>("READ of size 1"); + TestAsmRead<U2>("READ of size 2"); + TestAsmRead<U4>("READ of size 4"); +#if defined(__x86_64__) + TestAsmRead<U8>("READ of size 8"); +#endif // defined(__x86_64__) + TestAsmRead<__m128i>("READ of size 16"); +} + +TEST(AddressSanitizer, asm_flags) { + long magic = 0x1234; + long r = 0x0; + +#if defined(__x86_64__) + __asm__("xorq %%rax, %%rax \n\t" + "movq (%[p]), %%rax \n\t" + "sete %%al \n\t" + "movzbq %%al, %[r] \n\t" + : [r] "=r"(r) + : [p] "r"(&magic) + : "rax", "memory"); +#else + __asm__("xorl %%eax, %%eax \n\t" + "movl (%[p]), %%eax \n\t" + "sete %%al \n\t" + "movzbl %%al, %[r] \n\t" + : [r] "=r"(r) + : [p] "r"(&magic) + : "eax", "memory"); +#endif // defined(__x86_64__) + + ASSERT_EQ(0x1, r); +} + +TEST(AddressSanitizer, asm_rep_movs) { + TestAsmRepMovs<U1>("READ of size 1", "WRITE of size 1"); + TestAsmRepMovs<U2>("READ of size 2", "WRITE of size 2"); + TestAsmRepMovs<U4>("READ of size 4", "WRITE of size 4"); +#if defined(__x86_64__) + TestAsmRepMovs<U8>("READ of size 8", "WRITE of size 8"); +#endif // defined(__x86_64__) +} + +#endif // defined(__x86_64__) || (defined(__i386__) && defined(__SSE2__)) + +#endif // defined(__linux__) diff --git a/contrib/compiler-rt/lib/asan/tests/asan_benchmarks_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_benchmarks_test.cc new file mode 100644 index 000000000000..fc522de475fa --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_benchmarks_test.cc @@ -0,0 +1,85 @@ +//===-- asan_benchmarks_test.cc ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Some benchmarks for the instrumented code. +//===----------------------------------------------------------------------===// + +#include "asan_test_utils.h" + +template<class T> +__attribute__((noinline)) +static void ManyAccessFunc(T *x, size_t n_elements, size_t n_iter) { + for (size_t iter = 0; iter < n_iter; iter++) { + break_optimization(0); + // hand unroll the loop to stress the reg alloc. + for (size_t i = 0; i <= n_elements - 16; i += 16) { + x[i + 0] = i; + x[i + 1] = i; + x[i + 2] = i; + x[i + 3] = i; + x[i + 4] = i; + x[i + 5] = i; + x[i + 6] = i; + x[i + 7] = i; + x[i + 8] = i; + x[i + 9] = i; + x[i + 10] = i; + x[i + 11] = i; + x[i + 12] = i; + x[i + 13] = i; + x[i + 14] = i; + x[i + 15] = i; + } + } +} + +TEST(AddressSanitizer, ManyAccessBenchmark) { + size_t kLen = 1024; + int *int_array = new int[kLen]; + ManyAccessFunc(int_array, kLen, 1 << 24); + delete [] int_array; +} + +// access 7 char elements in a 7 byte array (i.e. on the border). +__attribute__((noinline)) +static void BorderAccessFunc(char *x, size_t n_iter) { + for (size_t iter = 0; iter < n_iter; iter++) { + break_optimization(x); + x[0] = 0; + x[1] = 0; + x[2] = 0; + x[3] = 0; + x[4] = 0; + x[5] = 0; + x[6] = 0; + } +} + +TEST(AddressSanitizer, BorderAccessBenchmark) { + char *char_7_array = new char[7]; + BorderAccessFunc(char_7_array, 1 << 30); + delete [] char_7_array; +} + +static void FunctionWithLargeStack() { + int stack[1000]; + Ident(stack); +} + +TEST(AddressSanitizer, FakeStackBenchmark) { + for (int i = 0; i < 10000000; i++) + Ident(&FunctionWithLargeStack)(); +} + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_exceptions_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_exceptions_test.cc new file mode 100644 index 000000000000..ecd406de7561 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_exceptions_test.cc @@ -0,0 +1,27 @@ +// See http://llvm.org/bugs/show_bug.cgi?id=11468 +#include <stdio.h> +#include <string> + +class Action { + public: + Action() {} + void PrintString(const std::string& msg) const { + fprintf(stderr, "%s\n", msg.c_str()); + } + void Throw(const char& arg) const { + PrintString("PrintString called!"); // this line is important + throw arg; + } +}; + +int main() { + const Action a; + fprintf(stderr, "&a before = %p\n", &a); + try { + a.Throw('c'); + } catch(const char&) { + fprintf(stderr, "&a in catch = %p\n", &a); + } + fprintf(stderr, "&a final = %p\n", &a); + return 0; +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_fake_stack_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_fake_stack_test.cc new file mode 100644 index 000000000000..516142f0c3b7 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_fake_stack_test.cc @@ -0,0 +1,152 @@ +//===-- asan_fake_stack_test.cc -------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Tests for FakeStack. +// This test file should be compiled w/o asan instrumentation. +//===----------------------------------------------------------------------===// + +#include "asan_fake_stack.h" +#include "asan_test_utils.h" +#include "sanitizer_common/sanitizer_common.h" + +#include <assert.h> +#include <stdlib.h> +#include <stdio.h> + +#include <map> + +namespace __asan { + +TEST(FakeStack, FlagsSize) { + EXPECT_EQ(FakeStack::SizeRequiredForFlags(10), 1U << 5); + EXPECT_EQ(FakeStack::SizeRequiredForFlags(11), 1U << 6); + EXPECT_EQ(FakeStack::SizeRequiredForFlags(20), 1U << 15); +} + +TEST(FakeStack, RequiredSize) { + // for (int i = 15; i < 20; i++) { + // uptr alloc_size = FakeStack::RequiredSize(i); + // printf("%zdK ==> %zd\n", 1 << (i - 10), alloc_size); + // } + EXPECT_EQ(FakeStack::RequiredSize(15), 365568U); + EXPECT_EQ(FakeStack::RequiredSize(16), 727040U); + EXPECT_EQ(FakeStack::RequiredSize(17), 1449984U); + EXPECT_EQ(FakeStack::RequiredSize(18), 2895872U); + EXPECT_EQ(FakeStack::RequiredSize(19), 5787648U); +} + +TEST(FakeStack, FlagsOffset) { + for (uptr stack_size_log = 15; stack_size_log <= 20; stack_size_log++) { + uptr stack_size = 1UL << stack_size_log; + uptr offset = 0; + for (uptr class_id = 0; class_id < FakeStack::kNumberOfSizeClasses; + class_id++) { + uptr frame_size = FakeStack::BytesInSizeClass(class_id); + uptr num_flags = stack_size / frame_size; + EXPECT_EQ(offset, FakeStack::FlagsOffset(stack_size_log, class_id)); + // printf("%zd: %zd => %zd %zd\n", stack_size_log, class_id, offset, + // FakeStack::FlagsOffset(stack_size_log, class_id)); + offset += num_flags; + } + } +} + +#if !defined(_WIN32) // FIXME: Fails due to OOM on Windows. +TEST(FakeStack, CreateDestroy) { + for (int i = 0; i < 1000; i++) { + for (uptr stack_size_log = 20; stack_size_log <= 22; stack_size_log++) { + FakeStack *fake_stack = FakeStack::Create(stack_size_log); + fake_stack->Destroy(0); + } + } +} +#endif + +TEST(FakeStack, ModuloNumberOfFrames) { + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, 0), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<15)), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<10)), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<9)), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<8)), 1U<<8); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<15) + 1), 1U); + + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 0), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 1<<9), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 1<<8), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 1<<7), 1U<<7); + + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 0), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 1), 1U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 15), 15U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 16), 0U); + EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 17), 1U); +} + +TEST(FakeStack, GetFrame) { + const uptr stack_size_log = 20; + const uptr stack_size = 1 << stack_size_log; + FakeStack *fs = FakeStack::Create(stack_size_log); + u8 *base = fs->GetFrame(stack_size_log, 0, 0); + EXPECT_EQ(base, reinterpret_cast<u8 *>(fs) + + fs->SizeRequiredForFlags(stack_size_log) + 4096); + EXPECT_EQ(base + 0*stack_size + 64 * 7, fs->GetFrame(stack_size_log, 0, 7U)); + EXPECT_EQ(base + 1*stack_size + 128 * 3, fs->GetFrame(stack_size_log, 1, 3U)); + EXPECT_EQ(base + 2*stack_size + 256 * 5, fs->GetFrame(stack_size_log, 2, 5U)); + fs->Destroy(0); +} + +TEST(FakeStack, Allocate) { + const uptr stack_size_log = 19; + FakeStack *fs = FakeStack::Create(stack_size_log); + std::map<FakeFrame *, uptr> s; + for (int iter = 0; iter < 2; iter++) { + s.clear(); + for (uptr cid = 0; cid < FakeStack::kNumberOfSizeClasses; cid++) { + uptr n = FakeStack::NumberOfFrames(stack_size_log, cid); + uptr bytes_in_class = FakeStack::BytesInSizeClass(cid); + for (uptr j = 0; j < n; j++) { + FakeFrame *ff = fs->Allocate(stack_size_log, cid, 0); + uptr x = reinterpret_cast<uptr>(ff); + EXPECT_TRUE(s.insert(std::make_pair(ff, cid)).second); + EXPECT_EQ(x, fs->AddrIsInFakeStack(x)); + EXPECT_EQ(x, fs->AddrIsInFakeStack(x + 1)); + EXPECT_EQ(x, fs->AddrIsInFakeStack(x + bytes_in_class - 1)); + EXPECT_NE(x, fs->AddrIsInFakeStack(x + bytes_in_class)); + } + // We are out of fake stack, so Allocate should return 0. + EXPECT_EQ(0UL, fs->Allocate(stack_size_log, cid, 0)); + } + for (std::map<FakeFrame *, uptr>::iterator it = s.begin(); it != s.end(); + ++it) { + fs->Deallocate(reinterpret_cast<uptr>(it->first), it->second); + } + } + fs->Destroy(0); +} + +static void RecursiveFunction(FakeStack *fs, int depth) { + uptr class_id = depth / 3; + FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, 0); + if (depth) { + RecursiveFunction(fs, depth - 1); + RecursiveFunction(fs, depth - 1); + } + fs->Deallocate(reinterpret_cast<uptr>(ff), class_id); +} + +TEST(FakeStack, RecursiveStressTest) { + const uptr stack_size_log = 16; + FakeStack *fs = FakeStack::Create(stack_size_log); + RecursiveFunction(fs, 22); // with 26 runs for 2-3 seconds. + fs->Destroy(0); +} + +} // namespace __asan diff --git a/contrib/compiler-rt/lib/asan/tests/asan_globals_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_globals_test.cc new file mode 100644 index 000000000000..5042ef07378d --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_globals_test.cc @@ -0,0 +1,45 @@ +//===-- asan_globals_test.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Some globals in a separate file. +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +char glob1[1]; +char glob2[2]; +char glob3[3]; +char glob4[4]; +char glob5[5]; +char glob6[6]; +char glob7[7]; +char glob8[8]; +char glob9[9]; +char glob10[10]; +char glob11[11]; +char glob12[12]; +char glob13[13]; +char glob14[14]; +char glob15[15]; +char glob16[16]; +char glob17[17]; +char glob1000[1000]; +char glob10000[10000]; +char glob100000[100000]; + +static char static10[10]; + +int GlobalsTest(int zero) { + static char func_static15[15]; + glob5[zero] = 0; + static10[zero] = 0; + func_static15[zero] = 0; + return glob5[1] + func_static15[2]; +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_interface_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_interface_test.cc new file mode 100644 index 000000000000..50fdf1119f0b --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_interface_test.cc @@ -0,0 +1,433 @@ +//===-- asan_interface_test.cc --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" +#include <sanitizer/allocator_interface.h> +#include <sanitizer/asan_interface.h> + +TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) { + EXPECT_EQ(0U, __sanitizer_get_estimated_allocated_size(0)); + const size_t sizes[] = { 1, 30, 1<<30 }; + for (size_t i = 0; i < 3; i++) { + EXPECT_EQ(sizes[i], __sanitizer_get_estimated_allocated_size(sizes[i])); + } +} + +static const char* kGetAllocatedSizeErrorMsg = + "attempting to call __sanitizer_get_allocated_size"; + +TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) { + const size_t kArraySize = 100; + char *array = Ident((char*)malloc(kArraySize)); + int *int_ptr = Ident(new int); + + // Allocated memory is owned by allocator. Allocated size should be + // equal to requested size. + EXPECT_EQ(true, __sanitizer_get_ownership(array)); + EXPECT_EQ(kArraySize, __sanitizer_get_allocated_size(array)); + EXPECT_EQ(true, __sanitizer_get_ownership(int_ptr)); + EXPECT_EQ(sizeof(int), __sanitizer_get_allocated_size(int_ptr)); + + // We cannot call GetAllocatedSize from the memory we didn't map, + // and from the interior pointers (not returned by previous malloc). + void *wild_addr = (void*)0x1; + EXPECT_FALSE(__sanitizer_get_ownership(wild_addr)); + EXPECT_DEATH(__sanitizer_get_allocated_size(wild_addr), + kGetAllocatedSizeErrorMsg); + EXPECT_FALSE(__sanitizer_get_ownership(array + kArraySize / 2)); + EXPECT_DEATH(__sanitizer_get_allocated_size(array + kArraySize / 2), + kGetAllocatedSizeErrorMsg); + + // NULL is not owned, but is a valid argument for + // __sanitizer_get_allocated_size(). + EXPECT_FALSE(__sanitizer_get_ownership(NULL)); + EXPECT_EQ(0U, __sanitizer_get_allocated_size(NULL)); + + // When memory is freed, it's not owned, and call to GetAllocatedSize + // is forbidden. + free(array); + EXPECT_FALSE(__sanitizer_get_ownership(array)); + EXPECT_DEATH(__sanitizer_get_allocated_size(array), + kGetAllocatedSizeErrorMsg); + delete int_ptr; + + void *zero_alloc = Ident(malloc(0)); + if (zero_alloc != 0) { + // If malloc(0) is not null, this pointer is owned and should have valid + // allocated size. + EXPECT_TRUE(__sanitizer_get_ownership(zero_alloc)); + // Allocated size is 0 or 1 depending on the allocator used. + EXPECT_LT(__sanitizer_get_allocated_size(zero_alloc), 2U); + } + free(zero_alloc); +} + +TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) { + size_t before_malloc, after_malloc, after_free; + char *array; + const size_t kMallocSize = 100; + before_malloc = __sanitizer_get_current_allocated_bytes(); + + array = Ident((char*)malloc(kMallocSize)); + after_malloc = __sanitizer_get_current_allocated_bytes(); + EXPECT_EQ(before_malloc + kMallocSize, after_malloc); + + free(array); + after_free = __sanitizer_get_current_allocated_bytes(); + EXPECT_EQ(before_malloc, after_free); +} + +TEST(AddressSanitizerInterface, GetHeapSizeTest) { + // asan_allocator2 does not keep huge chunks in free list, but unmaps them. + // The chunk should be greater than the quarantine size, + // otherwise it will be stuck in quarantine instead of being unmaped. + static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M + free(Ident(malloc(kLargeMallocSize))); // Drain quarantine. + size_t old_heap_size = __sanitizer_get_heap_size(); + for (int i = 0; i < 3; i++) { + // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); + free(Ident(malloc(kLargeMallocSize))); + EXPECT_EQ(old_heap_size, __sanitizer_get_heap_size()); + } +} + +static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357}; +static const size_t kManyThreadsIterations = 250; +static const size_t kManyThreadsNumThreads = + (SANITIZER_WORDSIZE == 32) ? 40 : 200; + +static void *ManyThreadsWithStatsWorker(void *arg) { + (void)arg; + for (size_t iter = 0; iter < kManyThreadsIterations; iter++) { + for (size_t size_index = 0; size_index < 4; size_index++) { + free(Ident(malloc(kManyThreadsMallocSizes[size_index]))); + } + } + // Just one large allocation. + free(Ident(malloc(1 << 20))); + return 0; +} + +TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) { + size_t before_test, after_test, i; + pthread_t threads[kManyThreadsNumThreads]; + before_test = __sanitizer_get_current_allocated_bytes(); + for (i = 0; i < kManyThreadsNumThreads; i++) { + PTHREAD_CREATE(&threads[i], 0, + (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i); + } + for (i = 0; i < kManyThreadsNumThreads; i++) { + PTHREAD_JOIN(threads[i], 0); + } + after_test = __sanitizer_get_current_allocated_bytes(); + // ASan stats also reflect memory usage of internal ASan RTL structs, + // so we can't check for equality here. + EXPECT_LT(after_test, before_test + (1UL<<20)); +} + +static void DoDoubleFree() { + int *x = Ident(new int); + delete Ident(x); + delete Ident(x); +} + +TEST(AddressSanitizerInterface, ExitCode) { + int original_exit_code = __asan_set_error_exit_code(7); + EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), ""); + EXPECT_EQ(7, __asan_set_error_exit_code(8)); + EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), ""); + EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code)); + EXPECT_EXIT(DoDoubleFree(), + ::testing::ExitedWithCode(original_exit_code), ""); +} + +static void MyDeathCallback() { + fprintf(stderr, "MyDeathCallback\n"); + fflush(0); // On Windows, stderr doesn't flush on crash. +} + +TEST(AddressSanitizerInterface, DeathCallbackTest) { + __asan_set_death_callback(MyDeathCallback); + EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback"); + __asan_set_death_callback(NULL); +} + +static const char* kUseAfterPoisonErrorMessage = "use-after-poison"; + +#define GOOD_ACCESS(ptr, offset) \ + EXPECT_FALSE(__asan_address_is_poisoned(ptr + offset)) + +#define BAD_ACCESS(ptr, offset) \ + EXPECT_TRUE(__asan_address_is_poisoned(ptr + offset)) + +TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) { + char *array = Ident((char*)malloc(120)); + // poison array[40..80) + __asan_poison_memory_region(array + 40, 40); + GOOD_ACCESS(array, 39); + GOOD_ACCESS(array, 80); + BAD_ACCESS(array, 40); + BAD_ACCESS(array, 60); + BAD_ACCESS(array, 79); + char value; + EXPECT_DEATH(value = Ident(array[40]), kUseAfterPoisonErrorMessage); + __asan_unpoison_memory_region(array + 40, 40); + // access previously poisoned memory. + GOOD_ACCESS(array, 40); + GOOD_ACCESS(array, 79); + free(array); +} + +TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) { + char *array = Ident((char*)malloc(120)); + // Poison [0..40) and [80..120) + __asan_poison_memory_region(array, 40); + __asan_poison_memory_region(array + 80, 40); + BAD_ACCESS(array, 20); + GOOD_ACCESS(array, 60); + BAD_ACCESS(array, 100); + // Poison whole array - [0..120) + __asan_poison_memory_region(array, 120); + BAD_ACCESS(array, 60); + // Unpoison [24..96) + __asan_unpoison_memory_region(array + 24, 72); + BAD_ACCESS(array, 23); + GOOD_ACCESS(array, 24); + GOOD_ACCESS(array, 60); + GOOD_ACCESS(array, 95); + BAD_ACCESS(array, 96); + free(array); +} + +TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) { + // Vector of capacity 20 + char *vec = Ident((char*)malloc(20)); + __asan_poison_memory_region(vec, 20); + for (size_t i = 0; i < 7; i++) { + // Simulate push_back. + __asan_unpoison_memory_region(vec + i, 1); + GOOD_ACCESS(vec, i); + BAD_ACCESS(vec, i + 1); + } + for (size_t i = 7; i > 0; i--) { + // Simulate pop_back. + __asan_poison_memory_region(vec + i - 1, 1); + BAD_ACCESS(vec, i - 1); + if (i > 1) GOOD_ACCESS(vec, i - 2); + } + free(vec); +} + +// Make sure that each aligned block of size "2^granularity" doesn't have +// "true" value before "false" value. +static void MakeShadowValid(bool *shadow, int length, int granularity) { + bool can_be_poisoned = true; + for (int i = length - 1; i >= 0; i--) { + if (!shadow[i]) + can_be_poisoned = false; + if (!can_be_poisoned) + shadow[i] = false; + if (i % (1 << granularity) == 0) { + can_be_poisoned = true; + } + } +} + +TEST(AddressSanitizerInterface, PoisoningStressTest) { + const size_t kSize = 24; + bool expected[kSize]; + char *arr = Ident((char*)malloc(kSize)); + for (size_t l1 = 0; l1 < kSize; l1++) { + for (size_t s1 = 1; l1 + s1 <= kSize; s1++) { + for (size_t l2 = 0; l2 < kSize; l2++) { + for (size_t s2 = 1; l2 + s2 <= kSize; s2++) { + // Poison [l1, l1+s1), [l2, l2+s2) and check result. + __asan_unpoison_memory_region(arr, kSize); + __asan_poison_memory_region(arr + l1, s1); + __asan_poison_memory_region(arr + l2, s2); + memset(expected, false, kSize); + memset(expected + l1, true, s1); + MakeShadowValid(expected, kSize, /*granularity*/ 3); + memset(expected + l2, true, s2); + MakeShadowValid(expected, kSize, /*granularity*/ 3); + for (size_t i = 0; i < kSize; i++) { + ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i)); + } + // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result. + __asan_poison_memory_region(arr, kSize); + __asan_unpoison_memory_region(arr + l1, s1); + __asan_unpoison_memory_region(arr + l2, s2); + memset(expected, true, kSize); + memset(expected + l1, false, s1); + MakeShadowValid(expected, kSize, /*granularity*/ 3); + memset(expected + l2, false, s2); + MakeShadowValid(expected, kSize, /*granularity*/ 3); + for (size_t i = 0; i < kSize; i++) { + ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i)); + } + } + } + } + } + free(arr); +} + +TEST(AddressSanitizerInterface, GlobalRedzones) { + GOOD_ACCESS(glob1, 1 - 1); + GOOD_ACCESS(glob2, 2 - 1); + GOOD_ACCESS(glob3, 3 - 1); + GOOD_ACCESS(glob4, 4 - 1); + GOOD_ACCESS(glob5, 5 - 1); + GOOD_ACCESS(glob6, 6 - 1); + GOOD_ACCESS(glob7, 7 - 1); + GOOD_ACCESS(glob8, 8 - 1); + GOOD_ACCESS(glob9, 9 - 1); + GOOD_ACCESS(glob10, 10 - 1); + GOOD_ACCESS(glob11, 11 - 1); + GOOD_ACCESS(glob12, 12 - 1); + GOOD_ACCESS(glob13, 13 - 1); + GOOD_ACCESS(glob14, 14 - 1); + GOOD_ACCESS(glob15, 15 - 1); + GOOD_ACCESS(glob16, 16 - 1); + GOOD_ACCESS(glob17, 17 - 1); + GOOD_ACCESS(glob1000, 1000 - 1); + GOOD_ACCESS(glob10000, 10000 - 1); + GOOD_ACCESS(glob100000, 100000 - 1); + + BAD_ACCESS(glob1, 1); + BAD_ACCESS(glob2, 2); + BAD_ACCESS(glob3, 3); + BAD_ACCESS(glob4, 4); + BAD_ACCESS(glob5, 5); + BAD_ACCESS(glob6, 6); + BAD_ACCESS(glob7, 7); + BAD_ACCESS(glob8, 8); + BAD_ACCESS(glob9, 9); + BAD_ACCESS(glob10, 10); + BAD_ACCESS(glob11, 11); + BAD_ACCESS(glob12, 12); + BAD_ACCESS(glob13, 13); + BAD_ACCESS(glob14, 14); + BAD_ACCESS(glob15, 15); + BAD_ACCESS(glob16, 16); + BAD_ACCESS(glob17, 17); + BAD_ACCESS(glob1000, 1000); + BAD_ACCESS(glob1000, 1100); // Redzone is at least 101 bytes. + BAD_ACCESS(glob10000, 10000); + BAD_ACCESS(glob10000, 11000); // Redzone is at least 1001 bytes. + BAD_ACCESS(glob100000, 100000); + BAD_ACCESS(glob100000, 110000); // Redzone is at least 10001 bytes. +} + +TEST(AddressSanitizerInterface, PoisonedRegion) { + size_t rz = 16; + for (size_t size = 1; size <= 64; size++) { + char *p = new char[size]; + for (size_t beg = 0; beg < size + rz; beg++) { + for (size_t end = beg; end < size + rz; end++) { + void *first_poisoned = __asan_region_is_poisoned(p + beg, end - beg); + if (beg == end) { + EXPECT_FALSE(first_poisoned); + } else if (beg < size && end <= size) { + EXPECT_FALSE(first_poisoned); + } else if (beg >= size) { + EXPECT_EQ(p + beg, first_poisoned); + } else { + EXPECT_GT(end, size); + EXPECT_EQ(p + size, first_poisoned); + } + } + } + delete [] p; + } +} + +// This is a performance benchmark for manual runs. +// asan's memset interceptor calls mem_is_zero for the entire shadow region. +// the profile should look like this: +// 89.10% [.] __memset_sse2 +// 10.50% [.] __sanitizer::mem_is_zero +// I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles +// than memset itself. +TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) { + size_t size = 1 << 20; + char *x = new char[size]; + for (int i = 0; i < 100000; i++) + Ident(memset)(x, 0, size); + delete [] x; +} + +// Same here, but we run memset with small sizes. +TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) { + size_t size = 32; + char *x = new char[size]; + for (int i = 0; i < 100000000; i++) + Ident(memset)(x, 0, size); + delete [] x; +} +static const char *kInvalidPoisonMessage = "invalid-poison-memory-range"; +static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range"; + +TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) { + char *array = Ident((char*)malloc(120)); + __asan_unpoison_memory_region(array, 120); + // Try to unpoison not owned memory + EXPECT_DEATH(__asan_unpoison_memory_region(array, 121), + kInvalidUnpoisonMessage); + EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120), + kInvalidUnpoisonMessage); + + __asan_poison_memory_region(array, 120); + // Try to poison not owned memory. + EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage); + EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120), + kInvalidPoisonMessage); + free(array); +} + +#if !defined(_WIN32) // FIXME: This should really be a lit test. +static void ErrorReportCallbackOneToZ(const char *report) { + int report_len = strlen(report); + ASSERT_EQ(6, write(2, "ABCDEF", 6)); + ASSERT_EQ(report_len, write(2, report, report_len)); + ASSERT_EQ(6, write(2, "ABCDEF", 6)); + _exit(1); +} + +TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) { + __asan_set_error_report_callback(ErrorReportCallbackOneToZ); + EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1), + ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF"); + __asan_set_error_report_callback(NULL); +} +#endif + +TEST(AddressSanitizerInterface, GetOwnershipStressTest) { + std::vector<char *> pointers; + std::vector<size_t> sizes; + const size_t kNumMallocs = 1 << 9; + for (size_t i = 0; i < kNumMallocs; i++) { + size_t size = i * 100 + 1; + pointers.push_back((char*)malloc(size)); + sizes.push_back(size); + } + for (size_t i = 0; i < 4000000; i++) { + EXPECT_FALSE(__sanitizer_get_ownership(&pointers)); + EXPECT_FALSE(__sanitizer_get_ownership((void*)0x1234)); + size_t idx = i % kNumMallocs; + EXPECT_TRUE(__sanitizer_get_ownership(pointers[idx])); + EXPECT_EQ(sizes[idx], __sanitizer_get_allocated_size(pointers[idx])); + } + for (size_t i = 0, n = pointers.size(); i < n; i++) + free(pointers[i]); +} + diff --git a/contrib/compiler-rt/lib/asan/tests/asan_mac_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_mac_test.cc new file mode 100644 index 000000000000..cabdfd711ea2 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_mac_test.cc @@ -0,0 +1,236 @@ +//===-- asan_test_mac.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// + +#include "asan_test_utils.h" + +#include "asan_mac_test.h" + +#include <malloc/malloc.h> +#include <AvailabilityMacros.h> // For MAC_OS_X_VERSION_* +#include <CoreFoundation/CFString.h> + +TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree) { + EXPECT_DEATH( + CFAllocatorDefaultDoubleFree(NULL), + "attempting double-free"); +} + +void CFAllocator_DoubleFreeOnPthread() { + pthread_t child; + PTHREAD_CREATE(&child, NULL, CFAllocatorDefaultDoubleFree, NULL); + PTHREAD_JOIN(child, NULL); // Shouldn't be reached. +} + +TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree_ChildPhread) { + EXPECT_DEATH(CFAllocator_DoubleFreeOnPthread(), "attempting double-free"); +} + +namespace { + +void *GLOB; + +void *CFAllocatorAllocateToGlob(void *unused) { + GLOB = CFAllocatorAllocate(NULL, 100, /*hint*/0); + return NULL; +} + +void *CFAllocatorDeallocateFromGlob(void *unused) { + char *p = (char*)GLOB; + p[100] = 'A'; // ASan should report an error here. + CFAllocatorDeallocate(NULL, GLOB); + return NULL; +} + +void CFAllocator_PassMemoryToAnotherThread() { + pthread_t th1, th2; + PTHREAD_CREATE(&th1, NULL, CFAllocatorAllocateToGlob, NULL); + PTHREAD_JOIN(th1, NULL); + PTHREAD_CREATE(&th2, NULL, CFAllocatorDeallocateFromGlob, NULL); + PTHREAD_JOIN(th2, NULL); +} + +TEST(AddressSanitizerMac, CFAllocator_PassMemoryToAnotherThread) { + EXPECT_DEATH(CFAllocator_PassMemoryToAnotherThread(), + "heap-buffer-overflow"); +} + +} // namespace + +// TODO(glider): figure out whether we still need these tests. Is it correct +// to intercept the non-default CFAllocators? +TEST(AddressSanitizerMac, DISABLED_CFAllocatorSystemDefaultDoubleFree) { + EXPECT_DEATH( + CFAllocatorSystemDefaultDoubleFree(), + "attempting double-free"); +} + +// We're intercepting malloc, so kCFAllocatorMalloc is routed to ASan. +TEST(AddressSanitizerMac, CFAllocatorMallocDoubleFree) { + EXPECT_DEATH(CFAllocatorMallocDoubleFree(), "attempting double-free"); +} + +TEST(AddressSanitizerMac, DISABLED_CFAllocatorMallocZoneDoubleFree) { + EXPECT_DEATH(CFAllocatorMallocZoneDoubleFree(), "attempting double-free"); +} + +// For libdispatch tests below we check that ASan got to the shadow byte +// legend, i.e. managed to print the thread stacks (this almost certainly +// means that the libdispatch task creation has been intercepted correctly). +TEST(AddressSanitizerMac, GCDDispatchAsync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDDispatchAsync(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDDispatchSync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDDispatchSync(), "Shadow byte legend"); +} + + +TEST(AddressSanitizerMac, GCDReuseWqthreadsAsync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDReuseWqthreadsAsync(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDReuseWqthreadsSync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDReuseWqthreadsSync(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDDispatchAfter) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDDispatchAfter(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDSourceEvent) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDSourceEvent(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDSourceCancel) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDSourceCancel(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDGroupAsync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDGroupAsync(), "Shadow byte legend"); +} + +void *MallocIntrospectionLockWorker(void *_) { + const int kNumPointers = 100; + int i; + void *pointers[kNumPointers]; + for (i = 0; i < kNumPointers; i++) { + pointers[i] = malloc(i + 1); + } + for (i = 0; i < kNumPointers; i++) { + free(pointers[i]); + } + + return NULL; +} + +void *MallocIntrospectionLockForker(void *_) { + pid_t result = fork(); + if (result == -1) { + perror("fork"); + } + assert(result != -1); + if (result == 0) { + // Call malloc in the child process to make sure we won't deadlock. + void *ptr = malloc(42); + free(ptr); + exit(0); + } else { + // Return in the parent process. + return NULL; + } +} + +TEST(AddressSanitizerMac, MallocIntrospectionLock) { + // Incorrect implementation of force_lock and force_unlock in our malloc zone + // will cause forked processes to deadlock. + // TODO(glider): need to detect that none of the child processes deadlocked. + const int kNumWorkers = 5, kNumIterations = 100; + int i, iter; + for (iter = 0; iter < kNumIterations; iter++) { + pthread_t workers[kNumWorkers], forker; + for (i = 0; i < kNumWorkers; i++) { + PTHREAD_CREATE(&workers[i], 0, MallocIntrospectionLockWorker, 0); + } + PTHREAD_CREATE(&forker, 0, MallocIntrospectionLockForker, 0); + for (i = 0; i < kNumWorkers; i++) { + PTHREAD_JOIN(workers[i], 0); + } + PTHREAD_JOIN(forker, 0); + } +} + +void *TSDAllocWorker(void *test_key) { + if (test_key) { + void *mem = malloc(10); + pthread_setspecific(*(pthread_key_t*)test_key, mem); + } + return NULL; +} + +TEST(AddressSanitizerMac, DISABLED_TSDWorkqueueTest) { + pthread_t th; + pthread_key_t test_key; + pthread_key_create(&test_key, CallFreeOnWorkqueue); + PTHREAD_CREATE(&th, NULL, TSDAllocWorker, &test_key); + PTHREAD_JOIN(th, NULL); + pthread_key_delete(test_key); +} + +// Test that CFStringCreateCopy does not copy constant strings. +TEST(AddressSanitizerMac, CFStringCreateCopy) { + CFStringRef str = CFSTR("Hello world!\n"); + CFStringRef str2 = CFStringCreateCopy(0, str); + EXPECT_EQ(str, str2); +} + +TEST(AddressSanitizerMac, NSObjectOOB) { + // Make sure that our allocators are used for NSObjects. + EXPECT_DEATH(TestOOBNSObjects(), "heap-buffer-overflow"); +} + +// Make sure that correct pointer is passed to free() when deallocating a +// NSURL object. +// See http://code.google.com/p/address-sanitizer/issues/detail?id=70. +TEST(AddressSanitizerMac, NSURLDeallocation) { + TestNSURLDeallocation(); +} + +// See http://code.google.com/p/address-sanitizer/issues/detail?id=109. +TEST(AddressSanitizerMac, Mstats) { + malloc_statistics_t stats1, stats2; + malloc_zone_statistics(/*all zones*/NULL, &stats1); + const size_t kMallocSize = 100000; + void *alloc = Ident(malloc(kMallocSize)); + malloc_zone_statistics(/*all zones*/NULL, &stats2); + EXPECT_GT(stats2.blocks_in_use, stats1.blocks_in_use); + EXPECT_GE(stats2.size_in_use - stats1.size_in_use, kMallocSize); + free(alloc); + // Even the default OSX allocator may not change the stats after free(). +} + diff --git a/contrib/compiler-rt/lib/asan/tests/asan_mac_test.h b/contrib/compiler-rt/lib/asan/tests/asan_mac_test.h new file mode 100644 index 000000000000..441547a5a3dc --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_mac_test.h @@ -0,0 +1,19 @@ +extern "C" { + void *CFAllocatorDefaultDoubleFree(void *unused); + void CFAllocatorSystemDefaultDoubleFree(); + void CFAllocatorMallocDoubleFree(); + void CFAllocatorMallocZoneDoubleFree(); + void CallFreeOnWorkqueue(void *mem); + void TestGCDDispatchAsync(); + void TestGCDDispatchSync(); + void TestGCDReuseWqthreadsAsync(); + void TestGCDReuseWqthreadsSync(); + void TestGCDDispatchAfter(); + void TestGCDInTSDDestructor(); + void TestGCDSourceEvent(); + void TestGCDSourceCancel(); + void TestGCDGroupAsync(); + void TestOOBNSObjects(); + void TestNSURLDeallocation(); + void TestPassCFMemoryToAnotherThread(); +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_mac_test_helpers.mm b/contrib/compiler-rt/lib/asan/tests/asan_mac_test_helpers.mm new file mode 100644 index 000000000000..a7e4b9d1928b --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_mac_test_helpers.mm @@ -0,0 +1,240 @@ +// Mac OS X 10.6 or higher only. +#include <dispatch/dispatch.h> +#include <pthread.h> // for pthread_yield_np() +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#import <CoreFoundation/CFBase.h> +#import <Foundation/NSObject.h> +#import <Foundation/NSURL.h> + +// This is a (void*)(void*) function so it can be passed to pthread_create. +void *CFAllocatorDefaultDoubleFree(void *unused) { + void *mem = CFAllocatorAllocate(kCFAllocatorDefault, 5, 0); + CFAllocatorDeallocate(kCFAllocatorDefault, mem); + CFAllocatorDeallocate(kCFAllocatorDefault, mem); + return 0; +} + +void CFAllocatorSystemDefaultDoubleFree() { + void *mem = CFAllocatorAllocate(kCFAllocatorSystemDefault, 5, 0); + CFAllocatorDeallocate(kCFAllocatorSystemDefault, mem); + CFAllocatorDeallocate(kCFAllocatorSystemDefault, mem); +} + +void CFAllocatorMallocDoubleFree() { + void *mem = CFAllocatorAllocate(kCFAllocatorMalloc, 5, 0); + CFAllocatorDeallocate(kCFAllocatorMalloc, mem); + CFAllocatorDeallocate(kCFAllocatorMalloc, mem); +} + +void CFAllocatorMallocZoneDoubleFree() { + void *mem = CFAllocatorAllocate(kCFAllocatorMallocZone, 5, 0); + CFAllocatorDeallocate(kCFAllocatorMallocZone, mem); + CFAllocatorDeallocate(kCFAllocatorMallocZone, mem); +} + +__attribute__((noinline)) +void access_memory(char *a) { + *a = 0; +} + +// Test the +load instrumentation. +// Because the +load methods are invoked before anything else is initialized, +// it makes little sense to wrap the code below into a gTest test case. +// If AddressSanitizer doesn't instrument the +load method below correctly, +// everything will just crash. + +char kStartupStr[] = + "If your test didn't crash, AddressSanitizer is instrumenting " + "the +load methods correctly."; + +@interface LoadSomething : NSObject { +} +@end + +@implementation LoadSomething + ++(void) load { + for (size_t i = 0; i < strlen(kStartupStr); i++) { + access_memory(&kStartupStr[i]); // make sure no optimizations occur. + } + // Don't print anything here not to interfere with the death tests. +} + +@end + +void worker_do_alloc(int size) { + char * volatile mem = (char * volatile)malloc(size); + mem[0] = 0; // Ok + free(mem); +} + +void worker_do_crash(int size) { + char * volatile mem = (char * volatile)malloc(size); + access_memory(&mem[size]); // BOOM + free(mem); +} + +// Used by the GCD tests to avoid a race between the worker thread reporting a +// memory error and the main thread which may exit with exit code 0 before +// that. +void wait_forever() { + volatile bool infinite = true; + while (infinite) pthread_yield_np(); +} + +// Tests for the Grand Central Dispatch. See +// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html +// for the reference. +void TestGCDDispatchAsync() { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_block_t block = ^{ worker_do_crash(1024); }; + // dispatch_async() runs the task on a worker thread that does not go through + // pthread_create(). We need to verify that AddressSanitizer notices that the + // thread has started. + dispatch_async(queue, block); + wait_forever(); +} + +void TestGCDDispatchSync() { + dispatch_queue_t queue = dispatch_get_global_queue(2, 0); + dispatch_block_t block = ^{ worker_do_crash(1024); }; + // dispatch_sync() runs the task on a worker thread that does not go through + // pthread_create(). We need to verify that AddressSanitizer notices that the + // thread has started. + dispatch_sync(queue, block); + wait_forever(); +} + +// libdispatch spawns a rather small number of threads and reuses them. We need +// to make sure AddressSanitizer handles the reusing correctly. +void TestGCDReuseWqthreadsAsync() { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_block_t block_alloc = ^{ worker_do_alloc(1024); }; + dispatch_block_t block_crash = ^{ worker_do_crash(1024); }; + for (int i = 0; i < 100; i++) { + dispatch_async(queue, block_alloc); + } + dispatch_async(queue, block_crash); + wait_forever(); +} + +// Try to trigger abnormal behaviour of dispatch_sync() being unhandled by us. +void TestGCDReuseWqthreadsSync() { + dispatch_queue_t queue[4]; + queue[0] = dispatch_get_global_queue(2, 0); + queue[1] = dispatch_get_global_queue(0, 0); + queue[2] = dispatch_get_global_queue(-2, 0); + queue[3] = dispatch_queue_create("my_queue", NULL); + dispatch_block_t block_alloc = ^{ worker_do_alloc(1024); }; + dispatch_block_t block_crash = ^{ worker_do_crash(1024); }; + for (int i = 0; i < 1000; i++) { + dispatch_sync(queue[i % 4], block_alloc); + } + dispatch_sync(queue[3], block_crash); + wait_forever(); +} + +void TestGCDDispatchAfter() { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_block_t block_crash = ^{ worker_do_crash(1024); }; + // Schedule the event one second from the current time. + dispatch_time_t milestone = + dispatch_time(DISPATCH_TIME_NOW, 1LL * NSEC_PER_SEC); + dispatch_after(milestone, queue, block_crash); + wait_forever(); +} + +void worker_do_deallocate(void *ptr) { + free(ptr); +} + +void CallFreeOnWorkqueue(void *tsd) { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_block_t block_dealloc = ^{ worker_do_deallocate(tsd); }; + dispatch_async(queue, block_dealloc); + // Do not wait for the worker to free the memory -- nobody is going to touch + // it. +} + +void TestGCDSourceEvent() { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_source_t timer = + dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); + // Schedule the timer one second from the current time. + dispatch_time_t milestone = + dispatch_time(DISPATCH_TIME_NOW, 1LL * NSEC_PER_SEC); + + dispatch_source_set_timer(timer, milestone, DISPATCH_TIME_FOREVER, 0); + char * volatile mem = (char * volatile)malloc(10); + dispatch_source_set_event_handler(timer, ^{ + access_memory(&mem[10]); + }); + dispatch_resume(timer); + wait_forever(); +} + +void TestGCDSourceCancel() { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_source_t timer = + dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); + // Schedule the timer one second from the current time. + dispatch_time_t milestone = + dispatch_time(DISPATCH_TIME_NOW, 1LL * NSEC_PER_SEC); + + dispatch_source_set_timer(timer, milestone, DISPATCH_TIME_FOREVER, 0); + char * volatile mem = (char * volatile)malloc(10); + // Both dispatch_source_set_cancel_handler() and + // dispatch_source_set_event_handler() use dispatch_barrier_async_f(). + // It's tricky to test dispatch_source_set_cancel_handler() separately, + // so we test both here. + dispatch_source_set_event_handler(timer, ^{ + dispatch_source_cancel(timer); + }); + dispatch_source_set_cancel_handler(timer, ^{ + access_memory(&mem[10]); + }); + dispatch_resume(timer); + wait_forever(); +} + +void TestGCDGroupAsync() { + dispatch_queue_t queue = dispatch_get_global_queue(0, 0); + dispatch_group_t group = dispatch_group_create(); + char * volatile mem = (char * volatile)malloc(10); + dispatch_group_async(group, queue, ^{ + access_memory(&mem[10]); + }); + dispatch_group_wait(group, DISPATCH_TIME_FOREVER); + wait_forever(); +} + +@interface FixedArray : NSObject { + int items[10]; +} +@end + +@implementation FixedArray +-(int) access: (int)index { + return items[index]; +} +@end + +void TestOOBNSObjects() { + id anObject = [FixedArray new]; + [anObject access:1]; + [anObject access:11]; + [anObject release]; +} + +void TestNSURLDeallocation() { + NSURL *base = + [[NSURL alloc] initWithString:@"file://localhost/Users/glider/Library/"]; + volatile NSURL *u = + [[NSURL alloc] initWithString:@"Saved Application State" + relativeToURL:base]; + [u release]; +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_mem_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_mem_test.cc new file mode 100644 index 000000000000..4a941faa0430 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_mem_test.cc @@ -0,0 +1,241 @@ +//===-- asan_mem_test.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +template<typename T> +void MemSetOOBTestTemplate(size_t length) { + if (length == 0) return; + size_t size = Ident(sizeof(T) * length); + T *array = Ident((T*)malloc(size)); + int element = Ident(42); + int zero = Ident(0); + void *(*MEMSET)(void *s, int c, size_t n) = Ident(memset); + // memset interval inside array + MEMSET(array, element, size); + MEMSET(array, element, size - 1); + MEMSET(array + length - 1, element, sizeof(T)); + MEMSET(array, element, 1); + + // memset 0 bytes + MEMSET(array - 10, element, zero); + MEMSET(array - 1, element, zero); + MEMSET(array, element, zero); + MEMSET(array + length, 0, zero); + MEMSET(array + length + 1, 0, zero); + + // try to memset bytes to the right of array + EXPECT_DEATH(MEMSET(array, 0, size + 1), + RightOOBWriteMessage(0)); + EXPECT_DEATH(MEMSET((char*)(array + length) - 1, element, 6), + RightOOBWriteMessage(0)); + EXPECT_DEATH(MEMSET(array + 1, element, size + sizeof(T)), + RightOOBWriteMessage(0)); + // whole interval is to the right + EXPECT_DEATH(MEMSET(array + length + 1, 0, 10), + RightOOBWriteMessage(sizeof(T))); + + // try to memset bytes to the left of array + EXPECT_DEATH(MEMSET((char*)array - 1, element, size), + LeftOOBWriteMessage(1)); + EXPECT_DEATH(MEMSET((char*)array - 5, 0, 6), + LeftOOBWriteMessage(5)); + if (length >= 100) { + // Large OOB, we find it only if the redzone is large enough. + EXPECT_DEATH(memset(array - 5, element, size + 5 * sizeof(T)), + LeftOOBWriteMessage(5 * sizeof(T))); + } + // whole interval is to the left + EXPECT_DEATH(MEMSET(array - 2, 0, sizeof(T)), + LeftOOBWriteMessage(2 * sizeof(T))); + + // try to memset bytes both to the left & to the right + EXPECT_DEATH(MEMSET((char*)array - 2, element, size + 4), + LeftOOBWriteMessage(2)); + + free(array); +} + +TEST(AddressSanitizer, MemSetOOBTest) { + MemSetOOBTestTemplate<char>(100); + MemSetOOBTestTemplate<int>(5); + MemSetOOBTestTemplate<double>(256); + // We can test arrays of structres/classes here, but what for? +} + +// Try to allocate two arrays of 'size' bytes that are near each other. +// Strictly speaking we are not guaranteed to find such two pointers, +// but given the structure of asan's allocator we will. +static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) { + vector<uintptr_t> v; + bool res = false; + for (size_t i = 0; i < 1000U && !res; i++) { + v.push_back(reinterpret_cast<uintptr_t>(new char[size])); + if (i == 0) continue; + sort(v.begin(), v.end()); + for (size_t j = 1; j < v.size(); j++) { + assert(v[j] > v[j-1]); + if ((size_t)(v[j] - v[j-1]) < size * 2) { + *x2 = reinterpret_cast<char*>(v[j]); + *x1 = reinterpret_cast<char*>(v[j-1]); + res = true; + break; + } + } + } + + for (size_t i = 0; i < v.size(); i++) { + char *p = reinterpret_cast<char *>(v[i]); + if (res && p == *x1) continue; + if (res && p == *x2) continue; + delete [] p; + } + return res; +} + +TEST(AddressSanitizer, LargeOOBInMemset) { + for (size_t size = 200; size < 100000; size += size / 2) { + char *x1, *x2; + if (!Ident(AllocateTwoAdjacentArrays)(&x1, &x2, size)) + continue; + // fprintf(stderr, " large oob memset: %p %p %zd\n", x1, x2, size); + // Do a memset on x1 with huge out-of-bound access that will end up in x2. + EXPECT_DEATH(Ident(memset)(x1, 0, size * 2), + "is located 0 bytes to the right"); + delete [] x1; + delete [] x2; + return; + } + assert(0 && "Did not find two adjacent malloc-ed pointers"); +} + +// Same test for memcpy and memmove functions +template <typename T, class M> +void MemTransferOOBTestTemplate(size_t length) { + if (length == 0) return; + size_t size = Ident(sizeof(T) * length); + T *src = Ident((T*)malloc(size)); + T *dest = Ident((T*)malloc(size)); + int zero = Ident(0); + + // valid transfer of bytes between arrays + M::transfer(dest, src, size); + M::transfer(dest + 1, src, size - sizeof(T)); + M::transfer(dest, src + length - 1, sizeof(T)); + M::transfer(dest, src, 1); + + // transfer zero bytes + M::transfer(dest - 1, src, 0); + M::transfer(dest + length, src, zero); + M::transfer(dest, src - 1, zero); + M::transfer(dest, src, zero); + + // try to change mem to the right of dest + EXPECT_DEATH(M::transfer(dest + 1, src, size), + RightOOBWriteMessage(0)); + EXPECT_DEATH(M::transfer((char*)(dest + length) - 1, src, 5), + RightOOBWriteMessage(0)); + + // try to change mem to the left of dest + EXPECT_DEATH(M::transfer(dest - 2, src, size), + LeftOOBWriteMessage(2 * sizeof(T))); + EXPECT_DEATH(M::transfer((char*)dest - 3, src, 4), + LeftOOBWriteMessage(3)); + + // try to access mem to the right of src + EXPECT_DEATH(M::transfer(dest, src + 2, size), + RightOOBReadMessage(0)); + EXPECT_DEATH(M::transfer(dest, (char*)(src + length) - 3, 6), + RightOOBReadMessage(0)); + + // try to access mem to the left of src + EXPECT_DEATH(M::transfer(dest, src - 1, size), + LeftOOBReadMessage(sizeof(T))); + EXPECT_DEATH(M::transfer(dest, (char*)src - 6, 7), + LeftOOBReadMessage(6)); + + // Generally we don't need to test cases where both accessing src and writing + // to dest address to poisoned memory. + + T *big_src = Ident((T*)malloc(size * 2)); + T *big_dest = Ident((T*)malloc(size * 2)); + // try to change mem to both sides of dest + EXPECT_DEATH(M::transfer(dest - 1, big_src, size * 2), + LeftOOBWriteMessage(sizeof(T))); + // try to access mem to both sides of src + EXPECT_DEATH(M::transfer(big_dest, src - 2, size * 2), + LeftOOBReadMessage(2 * sizeof(T))); + + free(src); + free(dest); + free(big_src); + free(big_dest); +} + +class MemCpyWrapper { + public: + static void* transfer(void *to, const void *from, size_t size) { + return Ident(memcpy)(to, from, size); + } +}; + +TEST(AddressSanitizer, MemCpyOOBTest) { + MemTransferOOBTestTemplate<char, MemCpyWrapper>(100); + MemTransferOOBTestTemplate<int, MemCpyWrapper>(1024); +} + +class MemMoveWrapper { + public: + static void* transfer(void *to, const void *from, size_t size) { + return Ident(memmove)(to, from, size); + } +}; + +TEST(AddressSanitizer, MemMoveOOBTest) { + MemTransferOOBTestTemplate<char, MemMoveWrapper>(100); + MemTransferOOBTestTemplate<int, MemMoveWrapper>(1024); +} + + +TEST(AddressSanitizer, MemCmpOOBTest) { + size_t size = Ident(100); + char *s1 = MallocAndMemsetString(size); + char *s2 = MallocAndMemsetString(size); + // Normal memcmp calls. + Ident(memcmp(s1, s2, size)); + Ident(memcmp(s1 + size - 1, s2 + size - 1, 1)); + Ident(memcmp(s1 - 1, s2 - 1, 0)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(Ident(memcmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(memcmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(memcmp)(s1 + size, s2, 1), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(memcmp)(s1, s2 + size, 1), RightOOBReadMessage(0)); + // Hit unallocated memory and die. + EXPECT_DEATH(Ident(memcmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(memcmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0)); + // Zero bytes are not terminators and don't prevent from OOB. + s1[size - 1] = '\0'; + s2[size - 1] = '\0'; + EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0)); + + // Even if the buffers differ in the first byte, we still assume that + // memcmp may access the whole buffer and thus reporting the overflow here: + s1[0] = 1; + s2[0] = 123; + EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0)); + + free(s1); + free(s2); +} + + + diff --git a/contrib/compiler-rt/lib/asan/tests/asan_noinst_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_noinst_test.cc new file mode 100644 index 000000000000..bb6af45bddf9 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_noinst_test.cc @@ -0,0 +1,269 @@ +//===-- asan_noinst_test.cc -----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This test file should be compiled w/o asan instrumentation. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_test_utils.h" +#include <sanitizer/allocator_interface.h> + +#include <assert.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> // for memset() +#include <algorithm> +#include <vector> +#include <limits> + +// ATTENTION! +// Please don't call intercepted functions (including malloc() and friends) +// in this test. The static runtime library is linked explicitly (without +// -fsanitize=address), thus the interceptors do not work correctly on OS X. + +#if !defined(_WIN32) +extern "C" { +// Set specific ASan options for uninstrumented unittest. +const char* __asan_default_options() { + return "allow_reexec=0"; +} +} // extern "C" +#endif + +// Make sure __asan_init is called before any test case is run. +struct AsanInitCaller { + AsanInitCaller() { __asan_init(); } +}; +static AsanInitCaller asan_init_caller; + +TEST(AddressSanitizer, InternalSimpleDeathTest) { + EXPECT_DEATH(exit(1), ""); +} + +static void MallocStress(size_t n) { + u32 seed = my_rand(); + BufferedStackTrace stack1; + stack1.trace_buffer[0] = 0xa123; + stack1.trace_buffer[1] = 0xa456; + stack1.size = 2; + + BufferedStackTrace stack2; + stack2.trace_buffer[0] = 0xb123; + stack2.trace_buffer[1] = 0xb456; + stack2.size = 2; + + BufferedStackTrace stack3; + stack3.trace_buffer[0] = 0xc123; + stack3.trace_buffer[1] = 0xc456; + stack3.size = 2; + + std::vector<void *> vec; + for (size_t i = 0; i < n; i++) { + if ((i % 3) == 0) { + if (vec.empty()) continue; + size_t idx = my_rand_r(&seed) % vec.size(); + void *ptr = vec[idx]; + vec[idx] = vec.back(); + vec.pop_back(); + __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC); + } else { + size_t size = my_rand_r(&seed) % 1000 + 1; + switch ((my_rand_r(&seed) % 128)) { + case 0: size += 1024; break; + case 1: size += 2048; break; + case 2: size += 4096; break; + } + size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1); + char *ptr = (char*)__asan::asan_memalign(alignment, size, + &stack2, __asan::FROM_MALLOC); + EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0)); + vec.push_back(ptr); + ptr[0] = 0; + ptr[size-1] = 0; + ptr[size/2] = 0; + } + } + for (size_t i = 0; i < vec.size(); i++) + __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC); +} + + +TEST(AddressSanitizer, NoInstMallocTest) { + MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000); +} + +TEST(AddressSanitizer, ThreadedMallocStressTest) { + const int kNumThreads = 4; + const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000; + pthread_t t[kNumThreads]; + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress, + (void*)kNumIterations); + } + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_JOIN(t[i], 0); + } +} + +static void PrintShadow(const char *tag, uptr ptr, size_t size) { + fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size); + uptr prev_shadow = 0; + for (sptr i = -32; i < (sptr)size + 32; i++) { + uptr shadow = __asan::MemToShadow(ptr + i); + if (i == 0 || i == (sptr)size) + fprintf(stderr, "."); + if (shadow != prev_shadow) { + prev_shadow = shadow; + fprintf(stderr, "%02x", (int)*(u8*)shadow); + } + } + fprintf(stderr, "\n"); +} + +TEST(AddressSanitizer, DISABLED_InternalPrintShadow) { + for (size_t size = 1; size <= 513; size++) { + char *ptr = new char[size]; + PrintShadow("m", (uptr)ptr, size); + delete [] ptr; + PrintShadow("f", (uptr)ptr, size); + } +} + +TEST(AddressSanitizer, QuarantineTest) { + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; + stack.size = 1; + + const int size = 1024; + void *p = __asan::asan_malloc(size, &stack); + __asan::asan_free(p, &stack, __asan::FROM_MALLOC); + size_t i; + size_t max_i = 1 << 30; + for (i = 0; i < max_i; i++) { + void *p1 = __asan::asan_malloc(size, &stack); + __asan::asan_free(p1, &stack, __asan::FROM_MALLOC); + if (p1 == p) break; + } + EXPECT_GE(i, 10000U); + EXPECT_LT(i, max_i); +} + +void *ThreadedQuarantineTestWorker(void *unused) { + (void)unused; + u32 seed = my_rand(); + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; + stack.size = 1; + + for (size_t i = 0; i < 1000; i++) { + void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack); + __asan::asan_free(p, &stack, __asan::FROM_MALLOC); + } + return NULL; +} + +// Check that the thread local allocators are flushed when threads are +// destroyed. +TEST(AddressSanitizer, ThreadedQuarantineTest) { + const int n_threads = 3000; + size_t mmaped1 = __sanitizer_get_heap_size(); + for (int i = 0; i < n_threads; i++) { + pthread_t t; + PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0); + PTHREAD_JOIN(t, 0); + size_t mmaped2 = __sanitizer_get_heap_size(); + EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20)); + } +} + +void *ThreadedOneSizeMallocStress(void *unused) { + (void)unused; + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; + stack.size = 1; + const size_t kNumMallocs = 1000; + for (int iter = 0; iter < 1000; iter++) { + void *p[kNumMallocs]; + for (size_t i = 0; i < kNumMallocs; i++) { + p[i] = __asan::asan_malloc(32, &stack); + } + for (size_t i = 0; i < kNumMallocs; i++) { + __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC); + } + } + return NULL; +} + +TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) { + const int kNumThreads = 4; + pthread_t t[kNumThreads]; + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0); + } + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_JOIN(t[i], 0); + } +} + +TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) { + using __asan::kHighMemEnd; + // Check that __asan_region_is_poisoned works for shadow regions. + uptr ptr = kLowShadowBeg + 200; + EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); + ptr = kShadowGapBeg + 200; + EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); + ptr = kHighShadowBeg + 200; + EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); +} + +// Test __asan_load1 & friends. +TEST(AddressSanitizer, LoadStoreCallbacks) { + typedef void (*CB)(uptr p); + CB cb[2][5] = { + { + __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16, + }, { + __asan_store1, __asan_store2, __asan_store4, __asan_store8, + __asan_store16, + } + }; + + uptr buggy_ptr; + + __asan_test_only_reported_buggy_pointer = &buggy_ptr; + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; + stack.size = 1; + + for (uptr len = 16; len <= 32; len++) { + char *ptr = (char*) __asan::asan_malloc(len, &stack); + uptr p = reinterpret_cast<uptr>(ptr); + for (uptr is_write = 0; is_write <= 1; is_write++) { + for (uptr size_log = 0; size_log <= 4; size_log++) { + uptr size = 1 << size_log; + CB call = cb[is_write][size_log]; + // Iterate only size-aligned offsets. + for (uptr offset = 0; offset <= len; offset += size) { + buggy_ptr = 0; + call(p + offset); + if (offset + size <= len) + EXPECT_EQ(buggy_ptr, 0U); + else + EXPECT_EQ(buggy_ptr, p + offset); + } + } + } + __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC); + } + __asan_test_only_reported_buggy_pointer = 0; +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_oob_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_oob_test.cc new file mode 100644 index 000000000000..0c6bea285864 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_oob_test.cc @@ -0,0 +1,128 @@ +//===-- asan_oob_test.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +NOINLINE void asan_write_sized_aligned(uint8_t *p, size_t size) { + EXPECT_EQ(0U, ((uintptr_t)p % size)); + if (size == 1) asan_write((uint8_t*)p); + else if (size == 2) asan_write((uint16_t*)p); + else if (size == 4) asan_write((uint32_t*)p); + else if (size == 8) asan_write((uint64_t*)p); +} + +template<typename T> +NOINLINE void oob_test(int size, int off) { + char *p = (char*)malloc_aaa(size); + // fprintf(stderr, "writing %d byte(s) into [%p,%p) with offset %d\n", + // sizeof(T), p, p + size, off); + asan_write((T*)(p + off)); + free_aaa(p); +} + +template<typename T> +void OOBTest() { + char expected_str[100]; + for (int size = sizeof(T); size < 20; size += 5) { + for (int i = -5; i < 0; i++) { + const char *str = + "is located.*%d byte.*to the left"; + sprintf(expected_str, str, abs(i)); + EXPECT_DEATH(oob_test<T>(size, i), expected_str); + } + + for (int i = 0; i < (int)(size - sizeof(T) + 1); i++) + oob_test<T>(size, i); + + for (int i = size - sizeof(T) + 1; i <= (int)(size + 2 * sizeof(T)); i++) { + const char *str = + "is located.*%d byte.*to the right"; + int off = i >= size ? (i - size) : 0; + // we don't catch unaligned partially OOB accesses. + if (i % sizeof(T)) continue; + sprintf(expected_str, str, off); + EXPECT_DEATH(oob_test<T>(size, i), expected_str); + } + } + + EXPECT_DEATH(oob_test<T>(kLargeMalloc, -1), + "is located.*1 byte.*to the left"); + EXPECT_DEATH(oob_test<T>(kLargeMalloc, kLargeMalloc), + "is located.*0 byte.*to the right"); +} + +// TODO(glider): the following tests are EXTREMELY slow on Darwin: +// AddressSanitizer.OOB_char (125503 ms) +// AddressSanitizer.OOB_int (126890 ms) +// AddressSanitizer.OOBRightTest (315605 ms) +// AddressSanitizer.SimpleStackTest (366559 ms) + +TEST(AddressSanitizer, OOB_char) { + OOBTest<U1>(); +} + +TEST(AddressSanitizer, OOB_int) { + OOBTest<U4>(); +} + +TEST(AddressSanitizer, OOBRightTest) { + size_t max_access_size = SANITIZER_WORDSIZE == 64 ? 8 : 4; + for (size_t access_size = 1; access_size <= max_access_size; + access_size *= 2) { + for (size_t alloc_size = 1; alloc_size <= 8; alloc_size++) { + for (size_t offset = 0; offset <= 8; offset += access_size) { + void *p = malloc(alloc_size); + // allocated: [p, p + alloc_size) + // accessed: [p + offset, p + offset + access_size) + uint8_t *addr = (uint8_t*)p + offset; + if (offset + access_size <= alloc_size) { + asan_write_sized_aligned(addr, access_size); + } else { + int outside_bytes = offset > alloc_size ? (offset - alloc_size) : 0; + const char *str = + "is located.%d *byte.*to the right"; + char expected_str[100]; + sprintf(expected_str, str, outside_bytes); + EXPECT_DEATH(asan_write_sized_aligned(addr, access_size), + expected_str); + } + free(p); + } + } + } +} + +TEST(AddressSanitizer, LargeOOBRightTest) { + size_t large_power_of_two = 1 << 19; + for (size_t i = 16; i <= 256; i *= 2) { + size_t size = large_power_of_two - i; + char *p = Ident(new char[size]); + EXPECT_DEATH(p[size] = 0, "is located 0 bytes to the right"); + delete [] p; + } +} + +TEST(AddressSanitizer, DISABLED_DemoOOBLeftLow) { + oob_test<U1>(10, -1); +} + +TEST(AddressSanitizer, DISABLED_DemoOOBLeftHigh) { + oob_test<U1>(kLargeMalloc, -1); +} + +TEST(AddressSanitizer, DISABLED_DemoOOBRightLow) { + oob_test<U1>(10, 10); +} + +TEST(AddressSanitizer, DISABLED_DemoOOBRightHigh) { + oob_test<U1>(kLargeMalloc, kLargeMalloc); +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_racy_double_free_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_racy_double_free_test.cc new file mode 100644 index 000000000000..23240e714d56 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_racy_double_free_test.cc @@ -0,0 +1,32 @@ +#include <pthread.h> +#include <stdlib.h> +#include <stdio.h> + +const int N = 1000; +void *x[N]; + +void *Thread1(void *unused) { + for (int i = 0; i < N; i++) { + fprintf(stderr, "%s %d\n", __func__, i); + free(x[i]); + } + return NULL; +} + +void *Thread2(void *unused) { + for (int i = 0; i < N; i++) { + fprintf(stderr, "%s %d\n", __func__, i); + free(x[i]); + } + return NULL; +} + +int main() { + for (int i = 0; i < N; i++) + x[i] = malloc(128); + pthread_t t[2]; + pthread_create(&t[0], 0, Thread1, 0); + pthread_create(&t[1], 0, Thread2, 0); + pthread_join(t[0], 0); + pthread_join(t[1], 0); +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_str_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_str_test.cc new file mode 100644 index 000000000000..1cd2a08b9021 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_str_test.cc @@ -0,0 +1,602 @@ +//=-- asan_str_test.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +#if defined(__APPLE__) +#include <AvailabilityMacros.h> // For MAC_OS_X_VERSION_* +#endif + +// Used for string functions tests +static char global_string[] = "global"; +static size_t global_string_length = 6; + +// Input to a test is a zero-terminated string str with given length +// Accesses to the bytes to the left and to the right of str +// are presumed to produce OOB errors +void StrLenOOBTestTemplate(char *str, size_t length, bool is_global) { + // Normal strlen calls + EXPECT_EQ(strlen(str), length); + if (length > 0) { + EXPECT_EQ(length - 1, strlen(str + 1)); + EXPECT_EQ(0U, strlen(str + length)); + } + // Arg of strlen is not malloced, OOB access + if (!is_global) { + // We don't insert RedZones to the left of global variables + EXPECT_DEATH(Ident(strlen(str - 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strlen(str - 5)), LeftOOBReadMessage(5)); + } + EXPECT_DEATH(Ident(strlen(str + length + 1)), RightOOBReadMessage(0)); + // Overwrite terminator + str[length] = 'a'; + // String is not zero-terminated, strlen will lead to OOB access + EXPECT_DEATH(Ident(strlen(str)), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(strlen(str + length)), RightOOBReadMessage(0)); + // Restore terminator + str[length] = 0; +} +TEST(AddressSanitizer, StrLenOOBTest) { + // Check heap-allocated string + size_t length = Ident(10); + char *heap_string = Ident((char*)malloc(length + 1)); + char stack_string[10 + 1]; + break_optimization(&stack_string); + for (size_t i = 0; i < length; i++) { + heap_string[i] = 'a'; + stack_string[i] = 'b'; + } + heap_string[length] = 0; + stack_string[length] = 0; + StrLenOOBTestTemplate(heap_string, length, false); + // TODO(samsonov): Fix expected messages in StrLenOOBTestTemplate to + // make test for stack_string work. Or move it to output tests. + // StrLenOOBTestTemplate(stack_string, length, false); + StrLenOOBTestTemplate(global_string, global_string_length, true); + free(heap_string); +} + +TEST(AddressSanitizer, WcsLenTest) { + EXPECT_EQ(0U, wcslen(Ident(L""))); + size_t hello_len = 13; + size_t hello_size = (hello_len + 1) * sizeof(wchar_t); + EXPECT_EQ(hello_len, wcslen(Ident(L"Hello, World!"))); + wchar_t *heap_string = Ident((wchar_t*)malloc(hello_size)); + memcpy(heap_string, L"Hello, World!", hello_size); + EXPECT_EQ(hello_len, Ident(wcslen(heap_string))); + EXPECT_DEATH(Ident(wcslen(heap_string + 14)), RightOOBReadMessage(0)); + free(heap_string); +} + +#if SANITIZER_TEST_HAS_STRNLEN +TEST(AddressSanitizer, StrNLenOOBTest) { + size_t size = Ident(123); + char *str = MallocAndMemsetString(size); + // Normal strnlen calls. + Ident(strnlen(str - 1, 0)); + Ident(strnlen(str, size)); + Ident(strnlen(str + size - 1, 1)); + str[size - 1] = '\0'; + Ident(strnlen(str, 2 * size)); + // Argument points to not allocated memory. + EXPECT_DEATH(Ident(strnlen(str - 1, 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strnlen(str + size, 1)), RightOOBReadMessage(0)); + // Overwrite the terminating '\0' and hit unallocated memory. + str[size - 1] = 'z'; + EXPECT_DEATH(Ident(strnlen(str, size + 1)), RightOOBReadMessage(0)); + free(str); +} +#endif // SANITIZER_TEST_HAS_STRNLEN + +TEST(AddressSanitizer, StrDupOOBTest) { + size_t size = Ident(42); + char *str = MallocAndMemsetString(size); + char *new_str; + // Normal strdup calls. + str[size - 1] = '\0'; + new_str = strdup(str); + free(new_str); + new_str = strdup(str + size - 1); + free(new_str); + // Argument points to not allocated memory. + EXPECT_DEATH(Ident(strdup(str - 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strdup(str + size)), RightOOBReadMessage(0)); + // Overwrite the terminating '\0' and hit unallocated memory. + str[size - 1] = 'z'; + EXPECT_DEATH(Ident(strdup(str)), RightOOBReadMessage(0)); + free(str); +} + +TEST(AddressSanitizer, StrCpyOOBTest) { + size_t to_size = Ident(30); + size_t from_size = Ident(6); // less than to_size + char *to = Ident((char*)malloc(to_size)); + char *from = Ident((char*)malloc(from_size)); + // Normal strcpy calls. + strcpy(from, "hello"); + strcpy(to, from); + strcpy(to + to_size - from_size, from); + // Length of "from" is too small. + EXPECT_DEATH(Ident(strcpy(from, "hello2")), RightOOBWriteMessage(0)); + // "to" or "from" points to not allocated memory. + EXPECT_DEATH(Ident(strcpy(to - 1, from)), LeftOOBWriteMessage(1)); + EXPECT_DEATH(Ident(strcpy(to, from - 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strcpy(to, from + from_size)), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(strcpy(to + to_size, from)), RightOOBWriteMessage(0)); + // Overwrite the terminating '\0' character and hit unallocated memory. + from[from_size - 1] = '!'; + EXPECT_DEATH(Ident(strcpy(to, from)), RightOOBReadMessage(0)); + free(to); + free(from); +} + +TEST(AddressSanitizer, StrNCpyOOBTest) { + size_t to_size = Ident(20); + size_t from_size = Ident(6); // less than to_size + char *to = Ident((char*)malloc(to_size)); + // From is a zero-terminated string "hello\0" of length 6 + char *from = Ident((char*)malloc(from_size)); + strcpy(from, "hello"); + // copy 0 bytes + strncpy(to, from, 0); + strncpy(to - 1, from - 1, 0); + // normal strncpy calls + strncpy(to, from, from_size); + strncpy(to, from, to_size); + strncpy(to, from + from_size - 1, to_size); + strncpy(to + to_size - 1, from, 1); + // One of {to, from} points to not allocated memory + EXPECT_DEATH(Ident(strncpy(to, from - 1, from_size)), + LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strncpy(to - 1, from, from_size)), + LeftOOBWriteMessage(1)); + EXPECT_DEATH(Ident(strncpy(to, from + from_size, 1)), + RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(strncpy(to + to_size, from, 1)), + RightOOBWriteMessage(0)); + // Length of "to" is too small + EXPECT_DEATH(Ident(strncpy(to + to_size - from_size + 1, from, from_size)), + RightOOBWriteMessage(0)); + EXPECT_DEATH(Ident(strncpy(to + 1, from, to_size)), + RightOOBWriteMessage(0)); + // Overwrite terminator in from + from[from_size - 1] = '!'; + // normal strncpy call + strncpy(to, from, from_size); + // Length of "from" is too small + EXPECT_DEATH(Ident(strncpy(to, from, to_size)), + RightOOBReadMessage(0)); + free(to); + free(from); +} + +// Users may have different definitions of "strchr" and "index", so provide +// function pointer typedefs and overload RunStrChrTest implementation. +// We can't use macro for RunStrChrTest body here, as this macro would +// confuse EXPECT_DEATH gtest macro. +typedef char*(*PointerToStrChr1)(const char*, int); +typedef char*(*PointerToStrChr2)(char*, int); + +UNUSED static void RunStrChrTest(PointerToStrChr1 StrChr) { + size_t size = Ident(100); + char *str = MallocAndMemsetString(size); + str[10] = 'q'; + str[11] = '\0'; + EXPECT_EQ(str, StrChr(str, 'z')); + EXPECT_EQ(str + 10, StrChr(str, 'q')); + EXPECT_EQ(NULL, StrChr(str, 'a')); + // StrChr argument points to not allocated memory. + EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0)); + // Overwrite the terminator and hit not allocated memory. + str[11] = 'z'; + EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0)); + free(str); +} +UNUSED static void RunStrChrTest(PointerToStrChr2 StrChr) { + size_t size = Ident(100); + char *str = MallocAndMemsetString(size); + str[10] = 'q'; + str[11] = '\0'; + EXPECT_EQ(str, StrChr(str, 'z')); + EXPECT_EQ(str + 10, StrChr(str, 'q')); + EXPECT_EQ(NULL, StrChr(str, 'a')); + // StrChr argument points to not allocated memory. + EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0)); + // Overwrite the terminator and hit not allocated memory. + str[11] = 'z'; + EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0)); + free(str); +} + +TEST(AddressSanitizer, StrChrAndIndexOOBTest) { + RunStrChrTest(&strchr); +// No index() on Windows and on Android L. +#if !defined(_WIN32) && !defined(__ANDROID__) + RunStrChrTest(&index); +#endif +} + +TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) { + // strcmp + EXPECT_EQ(0, strcmp("", "")); + EXPECT_EQ(0, strcmp("abcd", "abcd")); + EXPECT_GT(0, strcmp("ab", "ac")); + EXPECT_GT(0, strcmp("abc", "abcd")); + EXPECT_LT(0, strcmp("acc", "abc")); + EXPECT_LT(0, strcmp("abcd", "abc")); + + // strncmp + EXPECT_EQ(0, strncmp("a", "b", 0)); + EXPECT_EQ(0, strncmp("abcd", "abcd", 10)); + EXPECT_EQ(0, strncmp("abcd", "abcef", 3)); + EXPECT_GT(0, strncmp("abcde", "abcfa", 4)); + EXPECT_GT(0, strncmp("a", "b", 5)); + EXPECT_GT(0, strncmp("bc", "bcde", 4)); + EXPECT_LT(0, strncmp("xyz", "xyy", 10)); + EXPECT_LT(0, strncmp("baa", "aaa", 1)); + EXPECT_LT(0, strncmp("zyx", "", 2)); + +#if !defined(_WIN32) // no str[n]casecmp on Windows. + // strcasecmp + EXPECT_EQ(0, strcasecmp("", "")); + EXPECT_EQ(0, strcasecmp("zzz", "zzz")); + EXPECT_EQ(0, strcasecmp("abCD", "ABcd")); + EXPECT_GT(0, strcasecmp("aB", "Ac")); + EXPECT_GT(0, strcasecmp("ABC", "ABCd")); + EXPECT_LT(0, strcasecmp("acc", "abc")); + EXPECT_LT(0, strcasecmp("ABCd", "abc")); + + // strncasecmp + EXPECT_EQ(0, strncasecmp("a", "b", 0)); + EXPECT_EQ(0, strncasecmp("abCD", "ABcd", 10)); + EXPECT_EQ(0, strncasecmp("abCd", "ABcef", 3)); + EXPECT_GT(0, strncasecmp("abcde", "ABCfa", 4)); + EXPECT_GT(0, strncasecmp("a", "B", 5)); + EXPECT_GT(0, strncasecmp("bc", "BCde", 4)); + EXPECT_LT(0, strncasecmp("xyz", "xyy", 10)); + EXPECT_LT(0, strncasecmp("Baa", "aaa", 1)); + EXPECT_LT(0, strncasecmp("zyx", "", 2)); +#endif + + // memcmp + EXPECT_EQ(0, memcmp("a", "b", 0)); + EXPECT_EQ(0, memcmp("ab\0c", "ab\0c", 4)); + EXPECT_GT(0, memcmp("\0ab", "\0ac", 3)); + EXPECT_GT(0, memcmp("abb\0", "abba", 4)); + EXPECT_LT(0, memcmp("ab\0cd", "ab\0c\0", 5)); + EXPECT_LT(0, memcmp("zza", "zyx", 3)); +} + +typedef int(*PointerToStrCmp)(const char*, const char*); +void RunStrCmpTest(PointerToStrCmp StrCmp) { + size_t size = Ident(100); + int fill = 'o'; + char *s1 = MallocAndMemsetString(size, fill); + char *s2 = MallocAndMemsetString(size, fill); + s1[size - 1] = '\0'; + s2[size - 1] = '\0'; + // Normal StrCmp calls + Ident(StrCmp(s1, s2)); + Ident(StrCmp(s1, s2 + size - 1)); + Ident(StrCmp(s1 + size - 1, s2 + size - 1)); + s1[size - 1] = 'z'; + s2[size - 1] = 'x'; + Ident(StrCmp(s1, s2)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(Ident(StrCmp)(s1 - 1, s2), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrCmp)(s1, s2 - 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrCmp)(s1 + size, s2), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrCmp)(s1, s2 + size), RightOOBReadMessage(0)); + // Hit unallocated memory and die. + s1[size - 1] = fill; + EXPECT_DEATH(Ident(StrCmp)(s1, s1), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrCmp)(s1 + size - 1, s2), RightOOBReadMessage(0)); + free(s1); + free(s2); +} + +TEST(AddressSanitizer, StrCmpOOBTest) { + RunStrCmpTest(&strcmp); +} + +#if !defined(_WIN32) // no str[n]casecmp on Windows. +TEST(AddressSanitizer, StrCaseCmpOOBTest) { + RunStrCmpTest(&strcasecmp); +} +#endif + +typedef int(*PointerToStrNCmp)(const char*, const char*, size_t); +void RunStrNCmpTest(PointerToStrNCmp StrNCmp) { + size_t size = Ident(100); + char *s1 = MallocAndMemsetString(size); + char *s2 = MallocAndMemsetString(size); + s1[size - 1] = '\0'; + s2[size - 1] = '\0'; + // Normal StrNCmp calls + Ident(StrNCmp(s1, s2, size + 2)); + s1[size - 1] = 'z'; + s2[size - 1] = 'x'; + Ident(StrNCmp(s1 + size - 2, s2 + size - 2, size)); + s2[size - 1] = 'z'; + Ident(StrNCmp(s1 - 1, s2 - 1, 0)); + Ident(StrNCmp(s1 + size - 1, s2 + size - 1, 1)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(Ident(StrNCmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrNCmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrNCmp)(s1 + size, s2, 1), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrNCmp)(s1, s2 + size, 1), RightOOBReadMessage(0)); + // Hit unallocated memory and die. + EXPECT_DEATH(Ident(StrNCmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrNCmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0)); + free(s1); + free(s2); +} + +TEST(AddressSanitizer, StrNCmpOOBTest) { + RunStrNCmpTest(&strncmp); +} + +#if !defined(_WIN32) // no str[n]casecmp on Windows. +TEST(AddressSanitizer, StrNCaseCmpOOBTest) { + RunStrNCmpTest(&strncasecmp); +} +#endif + +TEST(AddressSanitizer, StrCatOOBTest) { + // strcat() reads strlen(to) bytes from |to| before concatenating. + size_t to_size = Ident(100); + char *to = MallocAndMemsetString(to_size); + to[0] = '\0'; + size_t from_size = Ident(20); + char *from = MallocAndMemsetString(from_size); + from[from_size - 1] = '\0'; + // Normal strcat calls. + strcat(to, from); + strcat(to, from); + strcat(to + from_size, from + from_size - 2); + // Passing an invalid pointer is an error even when concatenating an empty + // string. + EXPECT_DEATH(strcat(to - 1, from + from_size - 1), LeftOOBAccessMessage(1)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(strcat(to - 1, from), LeftOOBAccessMessage(1)); + EXPECT_DEATH(strcat(to, from - 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(strcat(to + to_size, from), RightOOBWriteMessage(0)); + EXPECT_DEATH(strcat(to, from + from_size), RightOOBReadMessage(0)); + + // "from" is not zero-terminated. + from[from_size - 1] = 'z'; + EXPECT_DEATH(strcat(to, from), RightOOBReadMessage(0)); + from[from_size - 1] = '\0'; + // "to" is not zero-terminated. + memset(to, 'z', to_size); + EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0)); + // "to" is too short to fit "from". + to[to_size - from_size + 1] = '\0'; + EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0)); + // length of "to" is just enough. + strcat(to, from + 1); + + free(to); + free(from); +} + +TEST(AddressSanitizer, StrNCatOOBTest) { + // strncat() reads strlen(to) bytes from |to| before concatenating. + size_t to_size = Ident(100); + char *to = MallocAndMemsetString(to_size); + to[0] = '\0'; + size_t from_size = Ident(20); + char *from = MallocAndMemsetString(from_size); + // Normal strncat calls. + strncat(to, from, 0); + strncat(to, from, from_size); + from[from_size - 1] = '\0'; + strncat(to, from, 2 * from_size); + // Catenating empty string with an invalid string is still an error. + EXPECT_DEATH(strncat(to - 1, from, 0), LeftOOBAccessMessage(1)); + strncat(to, from + from_size - 1, 10); + // One of arguments points to not allocated memory. + EXPECT_DEATH(strncat(to - 1, from, 2), LeftOOBAccessMessage(1)); + EXPECT_DEATH(strncat(to, from - 1, 2), LeftOOBReadMessage(1)); + EXPECT_DEATH(strncat(to + to_size, from, 2), RightOOBWriteMessage(0)); + EXPECT_DEATH(strncat(to, from + from_size, 2), RightOOBReadMessage(0)); + + memset(from, 'z', from_size); + memset(to, 'z', to_size); + to[0] = '\0'; + // "from" is too short. + EXPECT_DEATH(strncat(to, from, from_size + 1), RightOOBReadMessage(0)); + // "to" is not zero-terminated. + EXPECT_DEATH(strncat(to + 1, from, 1), RightOOBWriteMessage(0)); + // "to" is too short to fit "from". + to[0] = 'z'; + to[to_size - from_size + 1] = '\0'; + EXPECT_DEATH(strncat(to, from, from_size - 1), RightOOBWriteMessage(0)); + // "to" is just enough. + strncat(to, from, from_size - 2); + + free(to); + free(from); +} + +static string OverlapErrorMessage(const string &func) { + return func + "-param-overlap"; +} + +TEST(AddressSanitizer, StrArgsOverlapTest) { + size_t size = Ident(100); + char *str = Ident((char*)malloc(size)); + +// Do not check memcpy() on OS X 10.7 and later, where it actually aliases +// memmove(). +#if !defined(__APPLE__) || !defined(MAC_OS_X_VERSION_10_7) || \ + (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7) + // Check "memcpy". Use Ident() to avoid inlining. + memset(str, 'z', size); + Ident(memcpy)(str + 1, str + 11, 10); + Ident(memcpy)(str, str, 0); + EXPECT_DEATH(Ident(memcpy)(str, str + 14, 15), OverlapErrorMessage("memcpy")); + EXPECT_DEATH(Ident(memcpy)(str + 14, str, 15), OverlapErrorMessage("memcpy")); +#endif + + // We do not treat memcpy with to==from as a bug. + // See http://llvm.org/bugs/show_bug.cgi?id=11763. + // EXPECT_DEATH(Ident(memcpy)(str + 20, str + 20, 1), + // OverlapErrorMessage("memcpy")); + + // Check "strcpy". + memset(str, 'z', size); + str[9] = '\0'; + strcpy(str + 10, str); + EXPECT_DEATH(strcpy(str + 9, str), OverlapErrorMessage("strcpy")); + EXPECT_DEATH(strcpy(str, str + 4), OverlapErrorMessage("strcpy")); + strcpy(str, str + 5); + + // Check "strncpy". + memset(str, 'z', size); + strncpy(str, str + 10, 10); + EXPECT_DEATH(strncpy(str, str + 9, 10), OverlapErrorMessage("strncpy")); + EXPECT_DEATH(strncpy(str + 9, str, 10), OverlapErrorMessage("strncpy")); + str[10] = '\0'; + strncpy(str + 11, str, 20); + EXPECT_DEATH(strncpy(str + 10, str, 20), OverlapErrorMessage("strncpy")); + + // Check "strcat". + memset(str, 'z', size); + str[10] = '\0'; + str[20] = '\0'; + strcat(str, str + 10); + EXPECT_DEATH(strcat(str, str + 11), OverlapErrorMessage("strcat")); + str[10] = '\0'; + strcat(str + 11, str); + EXPECT_DEATH(strcat(str, str + 9), OverlapErrorMessage("strcat")); + EXPECT_DEATH(strcat(str + 9, str), OverlapErrorMessage("strcat")); + EXPECT_DEATH(strcat(str + 10, str), OverlapErrorMessage("strcat")); + + // Check "strncat". + memset(str, 'z', size); + str[10] = '\0'; + strncat(str, str + 10, 10); // from is empty + EXPECT_DEATH(strncat(str, str + 11, 10), OverlapErrorMessage("strncat")); + str[10] = '\0'; + str[20] = '\0'; + strncat(str + 5, str, 5); + str[10] = '\0'; + EXPECT_DEATH(strncat(str + 5, str, 6), OverlapErrorMessage("strncat")); + EXPECT_DEATH(strncat(str, str + 9, 10), OverlapErrorMessage("strncat")); + + free(str); +} + +typedef void(*PointerToCallAtoi)(const char*); + +void RunAtoiOOBTest(PointerToCallAtoi Atoi) { + char *array = MallocAndMemsetString(10, '1'); + // Invalid pointer to the string. + EXPECT_DEATH(Atoi(array + 11), RightOOBReadMessage(1)); + EXPECT_DEATH(Atoi(array - 1), LeftOOBReadMessage(1)); + // Die if a buffer doesn't have terminating NULL. + EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); + // Make last symbol a terminating NULL or other non-digit. + array[9] = '\0'; + Atoi(array); + array[9] = 'a'; + Atoi(array); + Atoi(array + 9); + // Sometimes we need to detect overflow if no digits are found. + memset(array, ' ', 10); + EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); + array[9] = '-'; + EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); + EXPECT_DEATH(Atoi(array + 9), RightOOBReadMessage(0)); + array[8] = '-'; + Atoi(array); + free(array); +} + +#if !defined(_WIN32) // FIXME: Fix and enable on Windows. +void CallAtoi(const char *nptr) { + Ident(atoi(nptr)); +} +void CallAtol(const char *nptr) { + Ident(atol(nptr)); +} +void CallAtoll(const char *nptr) { + Ident(atoll(nptr)); +} +TEST(AddressSanitizer, AtoiAndFriendsOOBTest) { + RunAtoiOOBTest(&CallAtoi); + RunAtoiOOBTest(&CallAtol); + RunAtoiOOBTest(&CallAtoll); +} +#endif + +typedef void(*PointerToCallStrtol)(const char*, char**, int); + +void RunStrtolOOBTest(PointerToCallStrtol Strtol) { + char *array = MallocAndMemsetString(3); + char *endptr = NULL; + array[0] = '1'; + array[1] = '2'; + array[2] = '3'; + // Invalid pointer to the string. + EXPECT_DEATH(Strtol(array + 3, NULL, 0), RightOOBReadMessage(0)); + EXPECT_DEATH(Strtol(array - 1, NULL, 0), LeftOOBReadMessage(1)); + // Buffer overflow if there is no terminating null (depends on base). + Strtol(array, &endptr, 3); + EXPECT_EQ(array + 2, endptr); + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[2] = 'z'; + Strtol(array, &endptr, 35); + EXPECT_EQ(array + 2, endptr); + EXPECT_DEATH(Strtol(array, NULL, 36), RightOOBReadMessage(0)); + // Add terminating zero to get rid of overflow. + array[2] = '\0'; + Strtol(array, NULL, 36); + // Don't check for overflow if base is invalid. + Strtol(array - 1, NULL, -1); + Strtol(array + 3, NULL, 1); + // Sometimes we need to detect overflow if no digits are found. + array[0] = array[1] = array[2] = ' '; + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[2] = '+'; + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[2] = '-'; + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[1] = '+'; + Strtol(array, NULL, 0); + array[1] = array[2] = 'z'; + Strtol(array, &endptr, 0); + EXPECT_EQ(array, endptr); + Strtol(array + 2, NULL, 0); + EXPECT_EQ(array, endptr); + free(array); +} + +#if !defined(_WIN32) // FIXME: Fix and enable on Windows. +void CallStrtol(const char *nptr, char **endptr, int base) { + Ident(strtol(nptr, endptr, base)); +} +void CallStrtoll(const char *nptr, char **endptr, int base) { + Ident(strtoll(nptr, endptr, base)); +} +TEST(AddressSanitizer, StrtollOOBTest) { + RunStrtolOOBTest(&CallStrtoll); +} +TEST(AddressSanitizer, StrtolOOBTest) { + RunStrtolOOBTest(&CallStrtol); +} +#endif + + diff --git a/contrib/compiler-rt/lib/asan/tests/asan_test.cc b/contrib/compiler-rt/lib/asan/tests/asan_test.cc new file mode 100644 index 000000000000..67bcbaca1e40 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_test.cc @@ -0,0 +1,1286 @@ +//===-- asan_test.cc ------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +NOINLINE void *malloc_fff(size_t size) { + void *res = malloc/**/(size); break_optimization(0); return res;} +NOINLINE void *malloc_eee(size_t size) { + void *res = malloc_fff(size); break_optimization(0); return res;} +NOINLINE void *malloc_ddd(size_t size) { + void *res = malloc_eee(size); break_optimization(0); return res;} +NOINLINE void *malloc_ccc(size_t size) { + void *res = malloc_ddd(size); break_optimization(0); return res;} +NOINLINE void *malloc_bbb(size_t size) { + void *res = malloc_ccc(size); break_optimization(0); return res;} +NOINLINE void *malloc_aaa(size_t size) { + void *res = malloc_bbb(size); break_optimization(0); return res;} + +NOINLINE void free_ccc(void *p) { free(p); break_optimization(0);} +NOINLINE void free_bbb(void *p) { free_ccc(p); break_optimization(0);} +NOINLINE void free_aaa(void *p) { free_bbb(p); break_optimization(0);} + +template<typename T> +NOINLINE void uaf_test(int size, int off) { + char *p = (char *)malloc_aaa(size); + free_aaa(p); + for (int i = 1; i < 100; i++) + free_aaa(malloc_aaa(i)); + fprintf(stderr, "writing %ld byte(s) at %p with offset %d\n", + (long)sizeof(T), p, off); + asan_write((T*)(p + off)); +} + +TEST(AddressSanitizer, HasFeatureAddressSanitizerTest) { +#if defined(__has_feature) && __has_feature(address_sanitizer) + bool asan = 1; +#elif defined(__SANITIZE_ADDRESS__) + bool asan = 1; +#else + bool asan = 0; +#endif + EXPECT_EQ(true, asan); +} + +TEST(AddressSanitizer, SimpleDeathTest) { + EXPECT_DEATH(exit(1), ""); +} + +TEST(AddressSanitizer, VariousMallocsTest) { + int *a = (int*)malloc(100 * sizeof(int)); + a[50] = 0; + free(a); + + int *r = (int*)malloc(10); + r = (int*)realloc(r, 2000 * sizeof(int)); + r[1000] = 0; + free(r); + + int *b = new int[100]; + b[50] = 0; + delete [] b; + + int *c = new int; + *c = 0; + delete c; + +#if SANITIZER_TEST_HAS_POSIX_MEMALIGN + int *pm; + int pm_res = posix_memalign((void**)&pm, kPageSize, kPageSize); + EXPECT_EQ(0, pm_res); + free(pm); +#endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN + +#if SANITIZER_TEST_HAS_MEMALIGN + int *ma = (int*)memalign(kPageSize, kPageSize); + EXPECT_EQ(0U, (uintptr_t)ma % kPageSize); + ma[123] = 0; + free(ma); +#endif // SANITIZER_TEST_HAS_MEMALIGN +} + +TEST(AddressSanitizer, CallocTest) { + int *a = (int*)calloc(100, sizeof(int)); + EXPECT_EQ(0, a[10]); + free(a); +} + +TEST(AddressSanitizer, CallocReturnsZeroMem) { + size_t sizes[] = {16, 1000, 10000, 100000, 2100000}; + for (size_t s = 0; s < sizeof(sizes)/sizeof(sizes[0]); s++) { + size_t size = sizes[s]; + for (size_t iter = 0; iter < 5; iter++) { + char *x = Ident((char*)calloc(1, size)); + EXPECT_EQ(x[0], 0); + EXPECT_EQ(x[size - 1], 0); + EXPECT_EQ(x[size / 2], 0); + EXPECT_EQ(x[size / 3], 0); + EXPECT_EQ(x[size / 4], 0); + memset(x, 0x42, size); + free(Ident(x)); +#if !defined(_WIN32) + // FIXME: OOM on Windows. We should just make this a lit test + // with quarantine size set to 1. + free(Ident(malloc(Ident(1 << 27)))); // Try to drain the quarantine. +#endif + } + } +} + +// No valloc on Windows or Android. +#if !defined(_WIN32) && !defined(__ANDROID__) +TEST(AddressSanitizer, VallocTest) { + void *a = valloc(100); + EXPECT_EQ(0U, (uintptr_t)a % kPageSize); + free(a); +} +#endif + +#if SANITIZER_TEST_HAS_PVALLOC +TEST(AddressSanitizer, PvallocTest) { + char *a = (char*)pvalloc(kPageSize + 100); + EXPECT_EQ(0U, (uintptr_t)a % kPageSize); + a[kPageSize + 101] = 1; // we should not report an error here. + free(a); + + a = (char*)pvalloc(0); // pvalloc(0) should allocate at least one page. + EXPECT_EQ(0U, (uintptr_t)a % kPageSize); + a[101] = 1; // we should not report an error here. + free(a); +} +#endif // SANITIZER_TEST_HAS_PVALLOC + +#if !defined(_WIN32) +// FIXME: Use an equivalent of pthread_setspecific on Windows. +void *TSDWorker(void *test_key) { + if (test_key) { + pthread_setspecific(*(pthread_key_t*)test_key, (void*)0xfeedface); + } + return NULL; +} + +void TSDDestructor(void *tsd) { + // Spawning a thread will check that the current thread id is not -1. + pthread_t th; + PTHREAD_CREATE(&th, NULL, TSDWorker, NULL); + PTHREAD_JOIN(th, NULL); +} + +// This tests triggers the thread-specific data destruction fiasco which occurs +// if we don't manage the TSD destructors ourselves. We create a new pthread +// key with a non-NULL destructor which is likely to be put after the destructor +// of AsanThread in the list of destructors. +// In this case the TSD for AsanThread will be destroyed before TSDDestructor +// is called for the child thread, and a CHECK will fail when we call +// pthread_create() to spawn the grandchild. +TEST(AddressSanitizer, DISABLED_TSDTest) { + pthread_t th; + pthread_key_t test_key; + pthread_key_create(&test_key, TSDDestructor); + PTHREAD_CREATE(&th, NULL, TSDWorker, &test_key); + PTHREAD_JOIN(th, NULL); + pthread_key_delete(test_key); +} +#endif + +TEST(AddressSanitizer, UAF_char) { + const char *uaf_string = "AddressSanitizer:.*heap-use-after-free"; + EXPECT_DEATH(uaf_test<U1>(1, 0), uaf_string); + EXPECT_DEATH(uaf_test<U1>(10, 0), uaf_string); + EXPECT_DEATH(uaf_test<U1>(10, 10), uaf_string); + EXPECT_DEATH(uaf_test<U1>(kLargeMalloc, 0), uaf_string); + EXPECT_DEATH(uaf_test<U1>(kLargeMalloc, kLargeMalloc / 2), uaf_string); +} + +TEST(AddressSanitizer, UAF_long_double) { + if (sizeof(long double) == sizeof(double)) return; + long double *p = Ident(new long double[10]); + EXPECT_DEATH(Ident(p)[12] = 0, "WRITE of size 1[026]"); + EXPECT_DEATH(Ident(p)[0] = Ident(p)[12], "READ of size 1[026]"); + delete [] Ident(p); +} + +#if !defined(_WIN32) +struct Packed5 { + int x; + char c; +} __attribute__((packed)); +#else +# pragma pack(push, 1) +struct Packed5 { + int x; + char c; +}; +# pragma pack(pop) +#endif + +TEST(AddressSanitizer, UAF_Packed5) { + static_assert(sizeof(Packed5) == 5, "Please check the keywords used"); + Packed5 *p = Ident(new Packed5[2]); + EXPECT_DEATH(p[0] = p[3], "READ of size 5"); + EXPECT_DEATH(p[3] = p[0], "WRITE of size 5"); + delete [] Ident(p); +} + +#if ASAN_HAS_BLACKLIST +TEST(AddressSanitizer, IgnoreTest) { + int *x = Ident(new int); + delete Ident(x); + *x = 0; +} +#endif // ASAN_HAS_BLACKLIST + +struct StructWithBitField { + int bf1:1; + int bf2:1; + int bf3:1; + int bf4:29; +}; + +TEST(AddressSanitizer, BitFieldPositiveTest) { + StructWithBitField *x = new StructWithBitField; + delete Ident(x); + EXPECT_DEATH(x->bf1 = 0, "use-after-free"); + EXPECT_DEATH(x->bf2 = 0, "use-after-free"); + EXPECT_DEATH(x->bf3 = 0, "use-after-free"); + EXPECT_DEATH(x->bf4 = 0, "use-after-free"); +} + +struct StructWithBitFields_8_24 { + int a:8; + int b:24; +}; + +TEST(AddressSanitizer, BitFieldNegativeTest) { + StructWithBitFields_8_24 *x = Ident(new StructWithBitFields_8_24); + x->a = 0; + x->b = 0; + delete Ident(x); +} + +#if ASAN_NEEDS_SEGV +namespace { + +const char kUnknownCrash[] = "AddressSanitizer: SEGV on unknown address"; +const char kOverriddenHandler[] = "ASan signal handler has been overridden\n"; + +TEST(AddressSanitizer, WildAddressTest) { + char *c = (char*)0x123; + EXPECT_DEATH(*c = 0, kUnknownCrash); +} + +void my_sigaction_sighandler(int, siginfo_t*, void*) { + fprintf(stderr, kOverriddenHandler); + exit(1); +} + +void my_signal_sighandler(int signum) { + fprintf(stderr, kOverriddenHandler); + exit(1); +} + +TEST(AddressSanitizer, SignalTest) { + struct sigaction sigact; + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_sigaction = my_sigaction_sighandler; + sigact.sa_flags = SA_SIGINFO; + // ASan should silently ignore sigaction()... + EXPECT_EQ(0, sigaction(SIGSEGV, &sigact, 0)); +#ifdef __APPLE__ + EXPECT_EQ(0, sigaction(SIGBUS, &sigact, 0)); +#endif + char *c = (char*)0x123; + EXPECT_DEATH(*c = 0, kUnknownCrash); + // ... and signal(). + EXPECT_EQ(0, signal(SIGSEGV, my_signal_sighandler)); + EXPECT_DEATH(*c = 0, kUnknownCrash); +} +} // namespace +#endif + +static void TestLargeMalloc(size_t size) { + char buff[1024]; + sprintf(buff, "is located 1 bytes to the left of %lu-byte", (long)size); + EXPECT_DEATH(Ident((char*)malloc(size))[-1] = 0, buff); +} + +TEST(AddressSanitizer, LargeMallocTest) { + const int max_size = (SANITIZER_WORDSIZE == 32) ? 1 << 26 : 1 << 28; + for (int i = 113; i < max_size; i = i * 2 + 13) { + TestLargeMalloc(i); + } +} + +TEST(AddressSanitizer, HugeMallocTest) { + if (SANITIZER_WORDSIZE != 64 || ASAN_AVOID_EXPENSIVE_TESTS) return; + size_t n_megs = 4100; + EXPECT_DEATH(Ident((char*)malloc(n_megs << 20))[-1] = 0, + "is located 1 bytes to the left|" + "AddressSanitizer failed to allocate"); +} + +#if SANITIZER_TEST_HAS_MEMALIGN +void MemalignRun(size_t align, size_t size, int idx) { + char *p = (char *)memalign(align, size); + Ident(p)[idx] = 0; + free(p); +} + +TEST(AddressSanitizer, memalign) { + for (int align = 16; align <= (1 << 23); align *= 2) { + size_t size = align * 5; + EXPECT_DEATH(MemalignRun(align, size, -1), + "is located 1 bytes to the left"); + EXPECT_DEATH(MemalignRun(align, size, size + 1), + "is located 1 bytes to the right"); + } +} +#endif // SANITIZER_TEST_HAS_MEMALIGN + +void *ManyThreadsWorker(void *a) { + for (int iter = 0; iter < 100; iter++) { + for (size_t size = 100; size < 2000; size *= 2) { + free(Ident(malloc(size))); + } + } + return 0; +} + +TEST(AddressSanitizer, ManyThreadsTest) { + const size_t kNumThreads = + (SANITIZER_WORDSIZE == 32 || ASAN_AVOID_EXPENSIVE_TESTS) ? 30 : 1000; + pthread_t t[kNumThreads]; + for (size_t i = 0; i < kNumThreads; i++) { + PTHREAD_CREATE(&t[i], 0, ManyThreadsWorker, (void*)i); + } + for (size_t i = 0; i < kNumThreads; i++) { + PTHREAD_JOIN(t[i], 0); + } +} + +TEST(AddressSanitizer, ReallocTest) { + const int kMinElem = 5; + int *ptr = (int*)malloc(sizeof(int) * kMinElem); + ptr[3] = 3; + for (int i = 0; i < 10000; i++) { + ptr = (int*)realloc(ptr, + (my_rand() % 1000 + kMinElem) * sizeof(int)); + EXPECT_EQ(3, ptr[3]); + } + free(ptr); + // Realloc pointer returned by malloc(0). + int *ptr2 = Ident((int*)malloc(0)); + ptr2 = Ident((int*)realloc(ptr2, sizeof(*ptr2))); + *ptr2 = 42; + EXPECT_EQ(42, *ptr2); + free(ptr2); +} + +TEST(AddressSanitizer, ReallocFreedPointerTest) { + void *ptr = Ident(malloc(42)); + ASSERT_TRUE(NULL != ptr); + free(ptr); + EXPECT_DEATH(ptr = realloc(ptr, 77), "attempting double-free"); +} + +TEST(AddressSanitizer, ReallocInvalidPointerTest) { + void *ptr = Ident(malloc(42)); + EXPECT_DEATH(ptr = realloc((int*)ptr + 1, 77), "attempting free.*not malloc"); + free(ptr); +} + +TEST(AddressSanitizer, ZeroSizeMallocTest) { + // Test that malloc(0) and similar functions don't return NULL. + void *ptr = Ident(malloc(0)); + EXPECT_TRUE(NULL != ptr); + free(ptr); +#if SANITIZER_TEST_HAS_POSIX_MEMALIGN + int pm_res = posix_memalign(&ptr, 1<<20, 0); + EXPECT_EQ(0, pm_res); + EXPECT_TRUE(NULL != ptr); + free(ptr); +#endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN + int *int_ptr = new int[0]; + int *int_ptr2 = new int[0]; + EXPECT_TRUE(NULL != int_ptr); + EXPECT_TRUE(NULL != int_ptr2); + EXPECT_NE(int_ptr, int_ptr2); + delete[] int_ptr; + delete[] int_ptr2; +} + +#if SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE +static const char *kMallocUsableSizeErrorMsg = + "AddressSanitizer: attempting to call malloc_usable_size()"; + +TEST(AddressSanitizer, MallocUsableSizeTest) { + const size_t kArraySize = 100; + char *array = Ident((char*)malloc(kArraySize)); + int *int_ptr = Ident(new int); + EXPECT_EQ(0U, malloc_usable_size(NULL)); + EXPECT_EQ(kArraySize, malloc_usable_size(array)); + EXPECT_EQ(sizeof(int), malloc_usable_size(int_ptr)); + EXPECT_DEATH(malloc_usable_size((void*)0x123), kMallocUsableSizeErrorMsg); + EXPECT_DEATH(malloc_usable_size(array + kArraySize / 2), + kMallocUsableSizeErrorMsg); + free(array); + EXPECT_DEATH(malloc_usable_size(array), kMallocUsableSizeErrorMsg); + delete int_ptr; +} +#endif // SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE + +void WrongFree() { + int *x = (int*)malloc(100 * sizeof(int)); + // Use the allocated memory, otherwise Clang will optimize it out. + Ident(x); + free(x + 1); +} + +#if !defined(_WIN32) // FIXME: This should be a lit test. +TEST(AddressSanitizer, WrongFreeTest) { + EXPECT_DEATH(WrongFree(), ASAN_PCRE_DOTALL + "ERROR: AddressSanitizer: attempting free.*not malloc" + ".*is located 4 bytes inside of 400-byte region" + ".*allocated by thread"); +} +#endif + +void DoubleFree() { + int *x = (int*)malloc(100 * sizeof(int)); + fprintf(stderr, "DoubleFree: x=%p\n", x); + free(x); + free(x); + fprintf(stderr, "should have failed in the second free(%p)\n", x); + abort(); +} + +#if !defined(_WIN32) // FIXME: This should be a lit test. +TEST(AddressSanitizer, DoubleFreeTest) { + EXPECT_DEATH(DoubleFree(), ASAN_PCRE_DOTALL + "ERROR: AddressSanitizer: attempting double-free" + ".*is located 0 bytes inside of 400-byte region" + ".*freed by thread T0 here" + ".*previously allocated by thread T0 here"); +} +#endif + +template<int kSize> +NOINLINE void SizedStackTest() { + char a[kSize]; + char *A = Ident((char*)&a); + const char *expected_death = "AddressSanitizer: stack-buffer-"; + for (size_t i = 0; i < kSize; i++) + A[i] = i; + EXPECT_DEATH(A[-1] = 0, expected_death); + EXPECT_DEATH(A[-5] = 0, expected_death); + EXPECT_DEATH(A[kSize] = 0, expected_death); + EXPECT_DEATH(A[kSize + 1] = 0, expected_death); + EXPECT_DEATH(A[kSize + 5] = 0, expected_death); + if (kSize > 16) + EXPECT_DEATH(A[kSize + 31] = 0, expected_death); +} + +TEST(AddressSanitizer, SimpleStackTest) { + SizedStackTest<1>(); + SizedStackTest<2>(); + SizedStackTest<3>(); + SizedStackTest<4>(); + SizedStackTest<5>(); + SizedStackTest<6>(); + SizedStackTest<7>(); + SizedStackTest<16>(); + SizedStackTest<25>(); + SizedStackTest<34>(); + SizedStackTest<43>(); + SizedStackTest<51>(); + SizedStackTest<62>(); + SizedStackTest<64>(); + SizedStackTest<128>(); +} + +#if !defined(_WIN32) +// FIXME: It's a bit hard to write multi-line death test expectations +// in a portable way. Anyways, this should just be turned into a lit test. +TEST(AddressSanitizer, ManyStackObjectsTest) { + char XXX[10]; + char YYY[20]; + char ZZZ[30]; + Ident(XXX); + Ident(YYY); + EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ"); +} +#endif + +#if 0 // This test requires online symbolizer. +// Moved to lit_tests/stack-oob-frames.cc. +// Reenable here once we have online symbolizer by default. +NOINLINE static void Frame0(int frame, char *a, char *b, char *c) { + char d[4] = {0}; + char *D = Ident(d); + switch (frame) { + case 3: a[5]++; break; + case 2: b[5]++; break; + case 1: c[5]++; break; + case 0: D[5]++; break; + } +} +NOINLINE static void Frame1(int frame, char *a, char *b) { + char c[4] = {0}; Frame0(frame, a, b, c); + break_optimization(0); +} +NOINLINE static void Frame2(int frame, char *a) { + char b[4] = {0}; Frame1(frame, a, b); + break_optimization(0); +} +NOINLINE static void Frame3(int frame) { + char a[4] = {0}; Frame2(frame, a); + break_optimization(0); +} + +TEST(AddressSanitizer, GuiltyStackFrame0Test) { + EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0"); +} +TEST(AddressSanitizer, GuiltyStackFrame1Test) { + EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1"); +} +TEST(AddressSanitizer, GuiltyStackFrame2Test) { + EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2"); +} +TEST(AddressSanitizer, GuiltyStackFrame3Test) { + EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3"); +} +#endif + +NOINLINE void LongJmpFunc1(jmp_buf buf) { + // create three red zones for these two stack objects. + int a; + int b; + + int *A = Ident(&a); + int *B = Ident(&b); + *A = *B; + longjmp(buf, 1); +} + +NOINLINE void TouchStackFunc() { + int a[100]; // long array will intersect with redzones from LongJmpFunc1. + int *A = Ident(a); + for (int i = 0; i < 100; i++) + A[i] = i*i; +} + +// Test that we handle longjmp and do not report false positives on stack. +TEST(AddressSanitizer, LongJmpTest) { + static jmp_buf buf; + if (!setjmp(buf)) { + LongJmpFunc1(buf); + } else { + TouchStackFunc(); + } +} + +#if !defined(_WIN32) // Only basic longjmp is available on Windows. +NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) { + // create three red zones for these two stack objects. + int a; + int b; + + int *A = Ident(&a); + int *B = Ident(&b); + *A = *B; + __builtin_longjmp((void**)buf, 1); +} + +NOINLINE void UnderscopeLongJmpFunc1(jmp_buf buf) { + // create three red zones for these two stack objects. + int a; + int b; + + int *A = Ident(&a); + int *B = Ident(&b); + *A = *B; + _longjmp(buf, 1); +} + +NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) { + // create three red zones for these two stack objects. + int a; + int b; + + int *A = Ident(&a); + int *B = Ident(&b); + *A = *B; + siglongjmp(buf, 1); +} + +#if !defined(__ANDROID__) && !defined(__arm__) && \ + !defined(__powerpc64__) && !defined(__powerpc__) +// Does not work on Power and ARM: +// https://code.google.com/p/address-sanitizer/issues/detail?id=185 +TEST(AddressSanitizer, BuiltinLongJmpTest) { + static jmp_buf buf; + if (!__builtin_setjmp((void**)buf)) { + BuiltinLongJmpFunc1(buf); + } else { + TouchStackFunc(); + } +} +#endif // !defined(__ANDROID__) && !defined(__powerpc64__) && + // !defined(__powerpc__) && !defined(__arm__) + +TEST(AddressSanitizer, UnderscopeLongJmpTest) { + static jmp_buf buf; + if (!_setjmp(buf)) { + UnderscopeLongJmpFunc1(buf); + } else { + TouchStackFunc(); + } +} + +TEST(AddressSanitizer, SigLongJmpTest) { + static sigjmp_buf buf; + if (!sigsetjmp(buf, 1)) { + SigLongJmpFunc1(buf); + } else { + TouchStackFunc(); + } +} +#endif + +// FIXME: Why does clang-cl define __EXCEPTIONS? +#if defined(__EXCEPTIONS) && !defined(_WIN32) +NOINLINE void ThrowFunc() { + // create three red zones for these two stack objects. + int a; + int b; + + int *A = Ident(&a); + int *B = Ident(&b); + *A = *B; + ASAN_THROW(1); +} + +TEST(AddressSanitizer, CxxExceptionTest) { + if (ASAN_UAR) return; + // TODO(kcc): this test crashes on 32-bit for some reason... + if (SANITIZER_WORDSIZE == 32) return; + try { + ThrowFunc(); + } catch(...) {} + TouchStackFunc(); +} +#endif + +void *ThreadStackReuseFunc1(void *unused) { + // create three red zones for these two stack objects. + int a; + int b; + + int *A = Ident(&a); + int *B = Ident(&b); + *A = *B; + pthread_exit(0); + return 0; +} + +void *ThreadStackReuseFunc2(void *unused) { + TouchStackFunc(); + return 0; +} + +TEST(AddressSanitizer, ThreadStackReuseTest) { + pthread_t t; + PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc1, 0); + PTHREAD_JOIN(t, 0); + PTHREAD_CREATE(&t, 0, ThreadStackReuseFunc2, 0); + PTHREAD_JOIN(t, 0); +} + +#if defined(__i686__) || defined(__x86_64__) +#include <emmintrin.h> +TEST(AddressSanitizer, Store128Test) { + char *a = Ident((char*)malloc(Ident(12))); + char *p = a; + if (((uintptr_t)a % 16) != 0) + p = a + 8; + assert(((uintptr_t)p % 16) == 0); + __m128i value_wide = _mm_set1_epi16(0x1234); + EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), + "AddressSanitizer: heap-buffer-overflow"); + EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), + "WRITE of size 16"); + EXPECT_DEATH(_mm_store_si128((__m128i*)p, value_wide), + "located 0 bytes to the right of 12-byte"); + free(a); +} +#endif + +// FIXME: All tests that use this function should be turned into lit tests. +string RightOOBErrorMessage(int oob_distance, bool is_write) { + assert(oob_distance >= 0); + char expected_str[100]; + sprintf(expected_str, ASAN_PCRE_DOTALL +#if !GTEST_USES_SIMPLE_RE + "buffer-overflow.*%s.*" +#endif + "located %d bytes to the right", +#if !GTEST_USES_SIMPLE_RE + is_write ? "WRITE" : "READ", +#endif + oob_distance); + return string(expected_str); +} + +string RightOOBWriteMessage(int oob_distance) { + return RightOOBErrorMessage(oob_distance, /*is_write*/true); +} + +string RightOOBReadMessage(int oob_distance) { + return RightOOBErrorMessage(oob_distance, /*is_write*/false); +} + +// FIXME: All tests that use this function should be turned into lit tests. +string LeftOOBErrorMessage(int oob_distance, bool is_write) { + assert(oob_distance > 0); + char expected_str[100]; + sprintf(expected_str, +#if !GTEST_USES_SIMPLE_RE + ASAN_PCRE_DOTALL "%s.*" +#endif + "located %d bytes to the left", +#if !GTEST_USES_SIMPLE_RE + is_write ? "WRITE" : "READ", +#endif + oob_distance); + return string(expected_str); +} + +string LeftOOBWriteMessage(int oob_distance) { + return LeftOOBErrorMessage(oob_distance, /*is_write*/true); +} + +string LeftOOBReadMessage(int oob_distance) { + return LeftOOBErrorMessage(oob_distance, /*is_write*/false); +} + +string LeftOOBAccessMessage(int oob_distance) { + assert(oob_distance > 0); + char expected_str[100]; + sprintf(expected_str, "located %d bytes to the left", oob_distance); + return string(expected_str); +} + +char* MallocAndMemsetString(size_t size, char ch) { + char *s = Ident((char*)malloc(size)); + memset(s, ch, size); + return s; +} + +char* MallocAndMemsetString(size_t size) { + return MallocAndMemsetString(size, 'z'); +} + +#if defined(__linux__) && !defined(__ANDROID__) +#define READ_TEST(READ_N_BYTES) \ + char *x = new char[10]; \ + int fd = open("/proc/self/stat", O_RDONLY); \ + ASSERT_GT(fd, 0); \ + EXPECT_DEATH(READ_N_BYTES, \ + ASAN_PCRE_DOTALL \ + "AddressSanitizer: heap-buffer-overflow" \ + ".* is located 0 bytes to the right of 10-byte region"); \ + close(fd); \ + delete [] x; \ + +TEST(AddressSanitizer, pread) { + READ_TEST(pread(fd, x, 15, 0)); +} + +TEST(AddressSanitizer, pread64) { + READ_TEST(pread64(fd, x, 15, 0)); +} + +TEST(AddressSanitizer, read) { + READ_TEST(read(fd, x, 15)); +} +#endif // defined(__linux__) && !defined(__ANDROID__) + +// This test case fails +// Clang optimizes memcpy/memset calls which lead to unaligned access +TEST(AddressSanitizer, DISABLED_MemIntrinsicUnalignedAccessTest) { + int size = Ident(4096); + char *s = Ident((char*)malloc(size)); + EXPECT_DEATH(memset(s + size - 1, 0, 2), RightOOBWriteMessage(0)); + free(s); +} + +// TODO(samsonov): Add a test with malloc(0) +// TODO(samsonov): Add tests for str* and mem* functions. + +NOINLINE static int LargeFunction(bool do_bad_access) { + int *x = new int[100]; + x[0]++; + x[1]++; + x[2]++; + x[3]++; + x[4]++; + x[5]++; + x[6]++; + x[7]++; + x[8]++; + x[9]++; + + x[do_bad_access ? 100 : 0]++; int res = __LINE__; + + x[10]++; + x[11]++; + x[12]++; + x[13]++; + x[14]++; + x[15]++; + x[16]++; + x[17]++; + x[18]++; + x[19]++; + + delete[] x; + return res; +} + +// Test the we have correct debug info for the failing instruction. +// This test requires the in-process symbolizer to be enabled by default. +TEST(AddressSanitizer, DISABLED_LargeFunctionSymbolizeTest) { + int failing_line = LargeFunction(false); + char expected_warning[128]; + sprintf(expected_warning, "LargeFunction.*asan_test.*:%d", failing_line); + EXPECT_DEATH(LargeFunction(true), expected_warning); +} + +// Check that we unwind and symbolize correctly. +TEST(AddressSanitizer, DISABLED_MallocFreeUnwindAndSymbolizeTest) { + int *a = (int*)malloc_aaa(sizeof(int)); + *a = 1; + free_aaa(a); + EXPECT_DEATH(*a = 1, "free_ccc.*free_bbb.*free_aaa.*" + "malloc_fff.*malloc_eee.*malloc_ddd"); +} + +static bool TryToSetThreadName(const char *name) { +#if defined(__linux__) && defined(PR_SET_NAME) + return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); +#else + return false; +#endif +} + +void *ThreadedTestAlloc(void *a) { + EXPECT_EQ(true, TryToSetThreadName("AllocThr")); + int **p = (int**)a; + *p = new int; + return 0; +} + +void *ThreadedTestFree(void *a) { + EXPECT_EQ(true, TryToSetThreadName("FreeThr")); + int **p = (int**)a; + delete *p; + return 0; +} + +void *ThreadedTestUse(void *a) { + EXPECT_EQ(true, TryToSetThreadName("UseThr")); + int **p = (int**)a; + **p = 1; + return 0; +} + +void ThreadedTestSpawn() { + pthread_t t; + int *x; + PTHREAD_CREATE(&t, 0, ThreadedTestAlloc, &x); + PTHREAD_JOIN(t, 0); + PTHREAD_CREATE(&t, 0, ThreadedTestFree, &x); + PTHREAD_JOIN(t, 0); + PTHREAD_CREATE(&t, 0, ThreadedTestUse, &x); + PTHREAD_JOIN(t, 0); +} + +#if !defined(_WIN32) // FIXME: This should be a lit test. +TEST(AddressSanitizer, ThreadedTest) { + EXPECT_DEATH(ThreadedTestSpawn(), + ASAN_PCRE_DOTALL + "Thread T.*created" + ".*Thread T.*created" + ".*Thread T.*created"); +} +#endif + +void *ThreadedTestFunc(void *unused) { + // Check if prctl(PR_SET_NAME) is supported. Return if not. + if (!TryToSetThreadName("TestFunc")) + return 0; + EXPECT_DEATH(ThreadedTestSpawn(), + ASAN_PCRE_DOTALL + "WRITE .*thread T. .UseThr." + ".*freed by thread T. .FreeThr. here:" + ".*previously allocated by thread T. .AllocThr. here:" + ".*Thread T. .UseThr. created by T.*TestFunc" + ".*Thread T. .FreeThr. created by T" + ".*Thread T. .AllocThr. created by T" + ""); + return 0; +} + +TEST(AddressSanitizer, ThreadNamesTest) { + // Run ThreadedTestFunc in a separate thread because it tries to set a + // thread name and we don't want to change the main thread's name. + pthread_t t; + PTHREAD_CREATE(&t, 0, ThreadedTestFunc, 0); + PTHREAD_JOIN(t, 0); +} + +#if ASAN_NEEDS_SEGV +TEST(AddressSanitizer, ShadowGapTest) { +#if SANITIZER_WORDSIZE == 32 + char *addr = (char*)0x22000000; +#else +# if defined(__powerpc64__) + char *addr = (char*)0x024000800000; +# else + char *addr = (char*)0x0000100000080000; +# endif +#endif + EXPECT_DEATH(*addr = 1, "AddressSanitizer: SEGV on unknown"); +} +#endif // ASAN_NEEDS_SEGV + +extern "C" { +NOINLINE static void UseThenFreeThenUse() { + char *x = Ident((char*)malloc(8)); + *x = 1; + free_aaa(x); + *x = 2; +} +} + +TEST(AddressSanitizer, UseThenFreeThenUseTest) { + EXPECT_DEATH(UseThenFreeThenUse(), "freed by thread"); +} + +TEST(AddressSanitizer, StrDupTest) { + free(strdup(Ident("123"))); +} + +// Currently we create and poison redzone at right of global variables. +static char static110[110]; +const char ConstGlob[7] = {1, 2, 3, 4, 5, 6, 7}; +static const char StaticConstGlob[3] = {9, 8, 7}; + +TEST(AddressSanitizer, GlobalTest) { + static char func_static15[15]; + + static char fs1[10]; + static char fs2[10]; + static char fs3[10]; + + glob5[Ident(0)] = 0; + glob5[Ident(1)] = 0; + glob5[Ident(2)] = 0; + glob5[Ident(3)] = 0; + glob5[Ident(4)] = 0; + + EXPECT_DEATH(glob5[Ident(5)] = 0, + "0 bytes to the right of global variable.*glob5.* size 5"); + EXPECT_DEATH(glob5[Ident(5+6)] = 0, + "6 bytes to the right of global variable.*glob5.* size 5"); + Ident(static110); // avoid optimizations + static110[Ident(0)] = 0; + static110[Ident(109)] = 0; + EXPECT_DEATH(static110[Ident(110)] = 0, + "0 bytes to the right of global variable"); + EXPECT_DEATH(static110[Ident(110+7)] = 0, + "7 bytes to the right of global variable"); + + Ident(func_static15); // avoid optimizations + func_static15[Ident(0)] = 0; + EXPECT_DEATH(func_static15[Ident(15)] = 0, + "0 bytes to the right of global variable"); + EXPECT_DEATH(func_static15[Ident(15 + 9)] = 0, + "9 bytes to the right of global variable"); + + Ident(fs1); + Ident(fs2); + Ident(fs3); + + // We don't create left redzones, so this is not 100% guaranteed to fail. + // But most likely will. + EXPECT_DEATH(fs2[Ident(-1)] = 0, "is located.*of global variable"); + + EXPECT_DEATH(Ident(Ident(ConstGlob)[8]), + "is located 1 bytes to the right of .*ConstGlob"); + EXPECT_DEATH(Ident(Ident(StaticConstGlob)[5]), + "is located 2 bytes to the right of .*StaticConstGlob"); + + // call stuff from another file. + GlobalsTest(0); +} + +TEST(AddressSanitizer, GlobalStringConstTest) { + static const char *zoo = "FOOBAR123"; + const char *p = Ident(zoo); + EXPECT_DEATH(Ident(p[15]), "is ascii string 'FOOBAR123'"); +} + +TEST(AddressSanitizer, FileNameInGlobalReportTest) { + static char zoo[10]; + const char *p = Ident(zoo); + // The file name should be present in the report. + EXPECT_DEATH(Ident(p[15]), "zoo.*asan_test."); +} + +int *ReturnsPointerToALocalObject() { + int a = 0; + return Ident(&a); +} + +#if ASAN_UAR == 1 +TEST(AddressSanitizer, LocalReferenceReturnTest) { + int *(*f)() = Ident(ReturnsPointerToALocalObject); + int *p = f(); + // Call 'f' a few more times, 'p' should still be poisoned. + for (int i = 0; i < 32; i++) + f(); + EXPECT_DEATH(*p = 1, "AddressSanitizer: stack-use-after-return"); + EXPECT_DEATH(*p = 1, "is located.*in frame .*ReturnsPointerToALocal"); +} +#endif + +template <int kSize> +NOINLINE static void FuncWithStack() { + char x[kSize]; + Ident(x)[0] = 0; + Ident(x)[kSize-1] = 0; +} + +static void LotsOfStackReuse() { + int LargeStack[10000]; + Ident(LargeStack)[0] = 0; + for (int i = 0; i < 10000; i++) { + FuncWithStack<128 * 1>(); + FuncWithStack<128 * 2>(); + FuncWithStack<128 * 4>(); + FuncWithStack<128 * 8>(); + FuncWithStack<128 * 16>(); + FuncWithStack<128 * 32>(); + FuncWithStack<128 * 64>(); + FuncWithStack<128 * 128>(); + FuncWithStack<128 * 256>(); + FuncWithStack<128 * 512>(); + Ident(LargeStack)[0] = 0; + } +} + +TEST(AddressSanitizer, StressStackReuseTest) { + LotsOfStackReuse(); +} + +TEST(AddressSanitizer, ThreadedStressStackReuseTest) { + const int kNumThreads = 20; + pthread_t t[kNumThreads]; + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))LotsOfStackReuse, 0); + } + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_JOIN(t[i], 0); + } +} + +static void *PthreadExit(void *a) { + pthread_exit(0); + return 0; +} + +TEST(AddressSanitizer, PthreadExitTest) { + pthread_t t; + for (int i = 0; i < 1000; i++) { + PTHREAD_CREATE(&t, 0, PthreadExit, 0); + PTHREAD_JOIN(t, 0); + } +} + +// FIXME: Why does clang-cl define __EXCEPTIONS? +#if defined(__EXCEPTIONS) && !defined(_WIN32) +NOINLINE static void StackReuseAndException() { + int large_stack[1000]; + Ident(large_stack); + ASAN_THROW(1); +} + +// TODO(kcc): support exceptions with use-after-return. +TEST(AddressSanitizer, DISABLED_StressStackReuseAndExceptionsTest) { + for (int i = 0; i < 10000; i++) { + try { + StackReuseAndException(); + } catch(...) { + } + } +} +#endif + +#if !defined(_WIN32) +TEST(AddressSanitizer, MlockTest) { + EXPECT_EQ(0, mlockall(MCL_CURRENT)); + EXPECT_EQ(0, mlock((void*)0x12345, 0x5678)); + EXPECT_EQ(0, munlockall()); + EXPECT_EQ(0, munlock((void*)0x987, 0x654)); +} +#endif + +struct LargeStruct { + int foo[100]; +}; + +// Test for bug http://llvm.org/bugs/show_bug.cgi?id=11763. +// Struct copy should not cause asan warning even if lhs == rhs. +TEST(AddressSanitizer, LargeStructCopyTest) { + LargeStruct a; + *Ident(&a) = *Ident(&a); +} + +ATTRIBUTE_NO_SANITIZE_ADDRESS +static void NoSanitizeAddress() { + char *foo = new char[10]; + Ident(foo)[10] = 0; + delete [] foo; +} + +TEST(AddressSanitizer, AttributeNoSanitizeAddressTest) { + Ident(NoSanitizeAddress)(); +} + +// The new/delete/etc mismatch checks don't work on Android, +// as calls to new/delete go through malloc/free. +// OS X support is tracked here: +// https://code.google.com/p/address-sanitizer/issues/detail?id=131 +// Windows support is tracked here: +// https://code.google.com/p/address-sanitizer/issues/detail?id=309 +#if !defined(__ANDROID__) && \ + !defined(__APPLE__) && \ + !defined(_WIN32) +static string MismatchStr(const string &str) { + return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str; +} + +TEST(AddressSanitizer, AllocDeallocMismatch) { + EXPECT_DEATH(free(Ident(new int)), + MismatchStr("operator new vs free")); + EXPECT_DEATH(free(Ident(new int[2])), + MismatchStr("operator new \\[\\] vs free")); + EXPECT_DEATH(delete (Ident(new int[2])), + MismatchStr("operator new \\[\\] vs operator delete")); + EXPECT_DEATH(delete (Ident((int*)malloc(2 * sizeof(int)))), + MismatchStr("malloc vs operator delete")); + EXPECT_DEATH(delete [] (Ident(new int)), + MismatchStr("operator new vs operator delete \\[\\]")); + EXPECT_DEATH(delete [] (Ident((int*)malloc(2 * sizeof(int)))), + MismatchStr("malloc vs operator delete \\[\\]")); +} +#endif + +// ------------------ demo tests; run each one-by-one ------------- +// e.g. --gtest_filter=*DemoOOBLeftHigh --gtest_also_run_disabled_tests +TEST(AddressSanitizer, DISABLED_DemoThreadedTest) { + ThreadedTestSpawn(); +} + +void *SimpleBugOnSTack(void *x = 0) { + char a[20]; + Ident(a)[20] = 0; + return 0; +} + +TEST(AddressSanitizer, DISABLED_DemoStackTest) { + SimpleBugOnSTack(); +} + +TEST(AddressSanitizer, DISABLED_DemoThreadStackTest) { + pthread_t t; + PTHREAD_CREATE(&t, 0, SimpleBugOnSTack, 0); + PTHREAD_JOIN(t, 0); +} + +TEST(AddressSanitizer, DISABLED_DemoUAFLowIn) { + uaf_test<U1>(10, 0); +} +TEST(AddressSanitizer, DISABLED_DemoUAFLowLeft) { + uaf_test<U1>(10, -2); +} +TEST(AddressSanitizer, DISABLED_DemoUAFLowRight) { + uaf_test<U1>(10, 10); +} + +TEST(AddressSanitizer, DISABLED_DemoUAFHigh) { + uaf_test<U1>(kLargeMalloc, 0); +} + +TEST(AddressSanitizer, DISABLED_DemoOOM) { + size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 40) : (0xf0000000); + printf("%p\n", malloc(size)); +} + +TEST(AddressSanitizer, DISABLED_DemoDoubleFreeTest) { + DoubleFree(); +} + +TEST(AddressSanitizer, DISABLED_DemoNullDerefTest) { + int *a = 0; + Ident(a)[10] = 0; +} + +TEST(AddressSanitizer, DISABLED_DemoFunctionStaticTest) { + static char a[100]; + static char b[100]; + static char c[100]; + Ident(a); + Ident(b); + Ident(c); + Ident(a)[5] = 0; + Ident(b)[105] = 0; + Ident(a)[5] = 0; +} + +TEST(AddressSanitizer, DISABLED_DemoTooMuchMemoryTest) { + const size_t kAllocSize = (1 << 28) - 1024; + size_t total_size = 0; + while (true) { + char *x = (char*)malloc(kAllocSize); + memset(x, 0, kAllocSize); + total_size += kAllocSize; + fprintf(stderr, "total: %ldM %p\n", (long)total_size >> 20, x); + } +} + +// http://code.google.com/p/address-sanitizer/issues/detail?id=66 +TEST(AddressSanitizer, BufferOverflowAfterManyFrees) { + for (int i = 0; i < 1000000; i++) { + delete [] (Ident(new char [8644])); + } + char *x = new char[8192]; + EXPECT_DEATH(x[Ident(8192)] = 0, "AddressSanitizer: heap-buffer-overflow"); + delete [] Ident(x); +} + + +// Test that instrumentation of stack allocations takes into account +// AllocSize of a type, and not its StoreSize (16 vs 10 bytes for long double). +// See http://llvm.org/bugs/show_bug.cgi?id=12047 for more details. +TEST(AddressSanitizer, LongDoubleNegativeTest) { + long double a, b; + static long double c; + memcpy(Ident(&a), Ident(&b), sizeof(long double)); + memcpy(Ident(&c), Ident(&b), sizeof(long double)); +} + +#if !defined(_WIN32) +TEST(AddressSanitizer, pthread_getschedparam) { + int policy; + struct sched_param param; + EXPECT_DEATH( + pthread_getschedparam(pthread_self(), &policy, Ident(¶m) + 2), + "AddressSanitizer: stack-buffer-.*flow"); + EXPECT_DEATH( + pthread_getschedparam(pthread_self(), Ident(&policy) - 1, ¶m), + "AddressSanitizer: stack-buffer-.*flow"); + int res = pthread_getschedparam(pthread_self(), &policy, ¶m); + ASSERT_EQ(0, res); +} +#endif diff --git a/contrib/compiler-rt/lib/asan/tests/asan_test.ignore b/contrib/compiler-rt/lib/asan/tests/asan_test.ignore new file mode 100644 index 000000000000..ea5c26099e75 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_test.ignore @@ -0,0 +1,3 @@ +# blacklisted functions for instrumented ASan unit test +fun:*IgnoreTest* +fun:*SomeOtherFunc* diff --git a/contrib/compiler-rt/lib/asan/tests/asan_test_config.h b/contrib/compiler-rt/lib/asan/tests/asan_test_config.h new file mode 100644 index 000000000000..92f276307481 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_test_config.h @@ -0,0 +1,54 @@ +//===-- asan_test_config.h --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#if !defined(INCLUDED_FROM_ASAN_TEST_UTILS_H) +# error "This file should be included into asan_test_utils.h only" +#endif + +#ifndef ASAN_TEST_CONFIG_H +#define ASAN_TEST_CONFIG_H + +#include <vector> +#include <string> +#include <map> + +using std::string; +using std::vector; +using std::map; + +#ifndef ASAN_UAR +# error "please define ASAN_UAR" +#endif + +#ifndef ASAN_HAS_EXCEPTIONS +# error "please define ASAN_HAS_EXCEPTIONS" +#endif + +#ifndef ASAN_HAS_BLACKLIST +# error "please define ASAN_HAS_BLACKLIST" +#endif + +#ifndef ASAN_NEEDS_SEGV +# if defined(_WIN32) +# define ASAN_NEEDS_SEGV 0 +# else +# define ASAN_NEEDS_SEGV 1 +# endif +#endif + +#ifndef ASAN_AVOID_EXPENSIVE_TESTS +# define ASAN_AVOID_EXPENSIVE_TESTS 0 +#endif + +#define ASAN_PCRE_DOTALL "" + +#endif // ASAN_TEST_CONFIG_H diff --git a/contrib/compiler-rt/lib/asan/tests/asan_test_main.cc b/contrib/compiler-rt/lib/asan/tests/asan_test_main.cc new file mode 100644 index 000000000000..1746c5f4837b --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_test_main.cc @@ -0,0 +1,19 @@ +//===-- asan_test_main.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +int main(int argc, char **argv) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/contrib/compiler-rt/lib/asan/tests/asan_test_utils.h b/contrib/compiler-rt/lib/asan/tests/asan_test_utils.h new file mode 100644 index 000000000000..03d17cfb26a7 --- /dev/null +++ b/contrib/compiler-rt/lib/asan/tests/asan_test_utils.h @@ -0,0 +1,107 @@ +//===-- asan_test_utils.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// + +#ifndef ASAN_TEST_UTILS_H +#define ASAN_TEST_UTILS_H + +#if !defined(SANITIZER_EXTERNAL_TEST_CONFIG) +# define INCLUDED_FROM_ASAN_TEST_UTILS_H +# include "asan_test_config.h" +# undef INCLUDED_FROM_ASAN_TEST_UTILS_H +#endif + +#include "sanitizer_test_utils.h" +#include "sanitizer_pthread_wrappers.h" + +#include <stdio.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <assert.h> +#include <algorithm> + +#if !defined(_WIN32) +# include <strings.h> +# include <sys/mman.h> +# include <setjmp.h> +#endif + +#ifdef __linux__ +# include <sys/prctl.h> +# include <sys/types.h> +# include <sys/stat.h> +# include <fcntl.h> +#include <unistd.h> +#endif + +#if !defined(__APPLE__) && !defined(__FreeBSD__) +#include <malloc.h> +#endif + +#if ASAN_HAS_EXCEPTIONS +# define ASAN_THROW(x) throw (x) +#else +# define ASAN_THROW(x) +#endif + +typedef uint8_t U1; +typedef uint16_t U2; +typedef uint32_t U4; +typedef uint64_t U8; + +static const int kPageSize = 4096; + +const size_t kLargeMalloc = 1 << 24; + +extern void free_aaa(void *p); +extern void *malloc_aaa(size_t size); + +template<typename T> +NOINLINE void asan_write(T *a) { + *a = 0; +} + +string RightOOBErrorMessage(int oob_distance, bool is_write); +string RightOOBWriteMessage(int oob_distance); +string RightOOBReadMessage(int oob_distance); +string LeftOOBErrorMessage(int oob_distance, bool is_write); +string LeftOOBWriteMessage(int oob_distance); +string LeftOOBReadMessage(int oob_distance); +string LeftOOBAccessMessage(int oob_distance); +char* MallocAndMemsetString(size_t size, char ch); +char* MallocAndMemsetString(size_t size); + +extern char glob1[1]; +extern char glob2[2]; +extern char glob3[3]; +extern char glob4[4]; +extern char glob5[5]; +extern char glob6[6]; +extern char glob7[7]; +extern char glob8[8]; +extern char glob9[9]; +extern char glob10[10]; +extern char glob11[11]; +extern char glob12[12]; +extern char glob13[13]; +extern char glob14[14]; +extern char glob15[15]; +extern char glob16[16]; +extern char glob17[17]; +extern char glob1000[1000]; +extern char glob10000[10000]; +extern char glob100000[100000]; +extern int GlobalsTest(int x); + +#endif // ASAN_TEST_UTILS_H |