diff options
Diffstat (limited to 'contrib/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc')
-rw-r--r-- | contrib/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc | 69 |
1 files changed, 59 insertions, 10 deletions
diff --git a/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc b/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc index e1c51f580874..180d7c199aef 100644 --- a/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc +++ b/contrib/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc @@ -59,7 +59,9 @@ extern "C" { #include <libkern/OSAtomic.h> #include <mach-o/dyld.h> #include <mach/mach.h> +#include <mach/mach_time.h> #include <mach/vm_statistics.h> +#include <malloc/malloc.h> #include <pthread.h> #include <sched.h> #include <signal.h> @@ -100,9 +102,15 @@ extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE; // ---------------------- sanitizer_libc.h + +// From <mach/vm_statistics.h>, but not on older OSs. +#ifndef VM_MEMORY_SANITIZER +#define VM_MEMORY_SANITIZER 99 +#endif + uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd, u64 offset) { - if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL); + if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER); if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset); return (uptr)mmap(addr, length, prot, flags, fd, offset); } @@ -338,10 +346,37 @@ void ReExec() { UNIMPLEMENTED(); } +void CheckASLR() { + // Do nothing +} + uptr GetPageSize() { return sysconf(_SC_PAGESIZE); } +extern "C" unsigned malloc_num_zones; +extern "C" malloc_zone_t **malloc_zones; +malloc_zone_t sanitizer_zone; + +// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If +// libmalloc tries to set up a different zone as malloc_zones[0], it will call +// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and +// make sure we are still the first (default) zone. +void MprotectMallocZones(void *addr, int prot) { + if (addr == malloc_zones && prot == PROT_READ) { + if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) { + for (unsigned i = 1; i < malloc_num_zones; i++) { + if (malloc_zones[i] == &sanitizer_zone) { + // Swap malloc_zones[0] and malloc_zones[i]. + malloc_zones[i] = malloc_zones[0]; + malloc_zones[0] = &sanitizer_zone; + break; + } + } + } + } +} + BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } @@ -362,11 +397,17 @@ void BlockingMutex::CheckLocked() { } u64 NanoTime() { - return 0; + timeval tv; + internal_memset(&tv, 0, sizeof(tv)); + gettimeofday(&tv, 0); + return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; } +// This needs to be called during initialization to avoid being racy. u64 MonotonicNanoTime() { - return 0; + static mach_timebase_info_data_t timebase_info; + if (timebase_info.denom == 0) mach_timebase_info(&timebase_info); + return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom; } uptr GetTlsSize() { @@ -428,6 +469,8 @@ static HandleSignalMode GetHandleSignalModeImpl(int signum) { return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; + case SIGTRAP: + return common_flags()->handle_sigtrap; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: @@ -673,6 +716,9 @@ bool DyldNeedsEnvVariable() { } void MaybeReexec() { + // FIXME: This should really live in some "InitializePlatform" method. + MonotonicNanoTime(); + if (ReexecDisabled()) return; // Make sure the dynamic runtime library is preloaded so that the @@ -875,10 +921,9 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } -uptr FindAvailableMemoryRange(uptr shadow_size, - uptr alignment, - uptr left_padding, - uptr *largest_gap_found) { +uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, + uptr *largest_gap_found, + uptr *max_occupied_addr) { typedef vm_region_submap_short_info_data_64_t RegionInfo; enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; // Start searching for available memory region past PAGEZERO, which is @@ -890,6 +935,7 @@ uptr FindAvailableMemoryRange(uptr shadow_size, mach_vm_address_t free_begin = start_address; kern_return_t kr = KERN_SUCCESS; if (largest_gap_found) *largest_gap_found = 0; + if (max_occupied_addr) *max_occupied_addr = 0; while (kr == KERN_SUCCESS) { mach_vm_size_t vmsize = 0; natural_t depth = 0; @@ -901,13 +947,15 @@ uptr FindAvailableMemoryRange(uptr shadow_size, // No more regions beyond "address", consider the gap at the end of VM. address = GetMaxVirtualAddress() + 1; vmsize = 0; + } else { + if (max_occupied_addr) *max_occupied_addr = address + vmsize; } if (free_begin != address) { // We found a free region [free_begin..address-1]. uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment); uptr gap_end = RoundDownTo((uptr)address, alignment); uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0; - if (shadow_size < gap_size) { + if (size < gap_size) { return gap_start; } @@ -994,9 +1042,10 @@ void FormatUUID(char *out, uptr size, const u8 *uuid) { void PrintModuleMap() { Printf("Process module map:\n"); MemoryMappingLayout memory_mapping(false); - InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128); + InternalMmapVector<LoadedModule> modules; + modules.reserve(128); memory_mapping.DumpListOfModules(&modules); - InternalSort(&modules, modules.size(), CompareBaseAddress); + Sort(modules.data(), modules.size(), CompareBaseAddress); for (uptr i = 0; i < modules.size(); ++i) { char uuid_str[128]; FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid()); |