aboutsummaryrefslogtreecommitdiff
path: root/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc')
-rw-r--r--contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc88
1 files changed, 73 insertions, 15 deletions
diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc
index 7247c6e00035..7693077f622b 100644
--- a/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc
+++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc
@@ -63,18 +63,69 @@ Allocator *allocator() {
return reinterpret_cast<Allocator*>(&allocator_placeholder);
}
+struct GlobalProc {
+ Mutex mtx;
+ Processor *proc;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
+ , proc(ProcCreate()) {
+ }
+};
+
+static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
+GlobalProc *global_proc() {
+ return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
+}
+
+ScopedGlobalProcessor::ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc())
+ return;
+ // If we don't have a proc, use the global one.
+ // There are currently only two known case where this path is triggered:
+ // __interceptor_free
+ // __nptl_deallocate_tsd
+ // start_thread
+ // clone
+ // and:
+ // ResetRange
+ // __interceptor_munmap
+ // __deallocate_stack
+ // start_thread
+ // clone
+ // Ideally, we destroy thread state (and unwire proc) when a thread actually
+ // exits (i.e. when we join/wait it). Then we would not need the global proc
+ gp->mtx.Lock();
+ ProcWire(gp->proc, thr);
+}
+
+ScopedGlobalProcessor::~ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc() != gp->proc)
+ return;
+ ProcUnwire(gp->proc, thr);
+ gp->mtx.Unlock();
+}
+
void InitializeAllocator() {
allocator()->Init(common_flags()->allocator_may_return_null);
}
-void AllocatorThreadStart(ThreadState *thr) {
- allocator()->InitCache(&thr->alloc_cache);
- internal_allocator()->InitCache(&thr->internal_alloc_cache);
+void InitializeAllocatorLate() {
+ new(global_proc()) GlobalProc();
+}
+
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
}
-void AllocatorThreadFinish(ThreadState *thr) {
- allocator()->DestroyCache(&thr->alloc_cache);
- internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
}
void AllocatorPrintStats() {
@@ -98,7 +149,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return allocator()->ReturnNullOrDie();
- void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
+ void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
if (ctx && ctx->initialized)
@@ -118,9 +169,10 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
}
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
- allocator()->Deallocate(&thr->alloc_cache, p);
+ allocator()->Deallocate(&thr->proc()->alloc_cache, p);
if (signal)
SignalUnsafeCall(thr, pc);
}
@@ -136,7 +188,7 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
@@ -164,7 +216,11 @@ uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
MBlock *b = ctx->metamap.GetBlock((uptr)p);
- return b ? b->siz : 0;
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
}
void invoke_malloc_hook(void *ptr, uptr size) {
@@ -172,6 +228,7 @@ void invoke_malloc_hook(void *ptr, uptr size) {
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__sanitizer_malloc_hook(ptr, size);
+ RunMallocHooks(ptr, size);
}
void invoke_free_hook(void *ptr) {
@@ -179,6 +236,7 @@ void invoke_free_hook(void *ptr) {
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__sanitizer_free_hook(ptr);
+ RunFreeHooks(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
@@ -187,7 +245,7 @@ void *internal_alloc(MBlockType typ, uptr sz) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- return InternalAlloc(sz, &thr->internal_alloc_cache);
+ return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
}
void internal_free(void *p) {
@@ -196,7 +254,7 @@ void internal_free(void *p) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- InternalFree(p, &thr->internal_alloc_cache);
+ InternalFree(p, &thr->proc()->internal_alloc_cache);
}
} // namespace __tsan
@@ -238,8 +296,8 @@ uptr __sanitizer_get_allocated_size(const void *p) {
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- allocator()->SwallowCache(&thr->alloc_cache);
- internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
- ctx->metamap.OnThreadIdle(thr);
+ allocator()->SwallowCache(&thr->proc()->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc());
}
} // extern "C"