aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib/tsan
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib/tsan')
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_defs.h1
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp47
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mman.cpp32
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mman.h2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp143
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.cpp56
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp2
7 files changed, 192 insertions, 91 deletions
diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h
index fe0c1da31599..4712c2be1813 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_defs.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h
@@ -228,6 +228,7 @@ enum MutexType {
MutexTypeFired,
MutexTypeRacy,
MutexTypeGlobalProc,
+ MutexTypeInternalAlloc,
};
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 25dbe487b280..cf3dc90d96a1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -177,6 +177,7 @@ struct ThreadSignalContext {
struct AtExitCtx {
void (*f)();
void *arg;
+ uptr pc;
};
// InterceptorContext holds all global data required for interceptors.
@@ -367,7 +368,10 @@ TSAN_INTERCEPTOR(int, pause, int fake) {
return BLOCK_REAL(pause)(fake);
}
-static void at_exit_wrapper() {
+// Note: we specifically call the function in such strange way
+// with "installed_at" because in reports it will appear between
+// callback frames and the frame that installed the callback.
+static void at_exit_callback_installed_at() {
AtExitCtx *ctx;
{
// Ensure thread-safety.
@@ -379,15 +383,21 @@ static void at_exit_wrapper() {
interceptor_ctx()->AtExitStack.PopBack();
}
- Acquire(cur_thread(), (uptr)0, (uptr)ctx);
+ ThreadState *thr = cur_thread();
+ Acquire(thr, ctx->pc, (uptr)ctx);
+ FuncEntry(thr, ctx->pc);
((void(*)())ctx->f)();
+ FuncExit(thr);
Free(ctx);
}
-static void cxa_at_exit_wrapper(void *arg) {
- Acquire(cur_thread(), 0, (uptr)arg);
+static void cxa_at_exit_callback_installed_at(void *arg) {
+ ThreadState *thr = cur_thread();
AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
((void(*)(void *arg))ctx->f)(ctx->arg);
+ FuncExit(thr);
Free(ctx);
}
@@ -401,7 +411,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
// We want to setup the atexit callback even if we are in ignored lib
// or after fork.
SCOPED_INTERCEPTOR_RAW(atexit, f);
- return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
}
#endif
@@ -409,7 +419,7 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (in_symbolizer())
return 0;
SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
@@ -417,6 +427,7 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
auto *ctx = New<AtExitCtx>();
ctx->f = f;
ctx->arg = arg;
+ ctx->pc = pc;
Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
@@ -432,25 +443,27 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
// due to atexit_mu held on exit from the calloc interceptor.
ScopedIgnoreInterceptors ignore;
- res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
+ 0, 0);
// Push AtExitCtx on the top of the stack of callback functions
if (!res) {
interceptor_ctx()->AtExitStack.PushBack(ctx);
}
} else {
- res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
+ res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
}
ThreadIgnoreEnd(thr);
return res;
}
#if !SANITIZER_MAC && !SANITIZER_NETBSD
-static void on_exit_wrapper(int status, void *arg) {
+static void on_exit_callback_installed_at(int status, void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ FuncExit(thr);
Free(ctx);
}
@@ -461,11 +474,12 @@ TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
auto *ctx = New<AtExitCtx>();
ctx->f = (void(*)())f;
ctx->arg = arg;
+ ctx->pc = GET_CALLER_PC();
Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
ThreadIgnoreBegin(thr, pc);
- int res = REAL(on_exit)(on_exit_wrapper, ctx);
+ int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
ThreadIgnoreEnd(thr);
return res;
}
@@ -2363,6 +2377,15 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
if (fd >= 0) FdClose(thr, pc, fd); \
}
+#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ \
+ CheckNoDeepBind(filename, flag); \
+ ThreadIgnoreBegin(thr, 0); \
+ void *res = REAL(dlopen)(filename, flag); \
+ ThreadIgnoreEnd(thr); \
+ res; \
+ })
+
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
libignore()->OnLibraryLoaded(filename)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index ef97ad0bc94e..a31bebcb6ba9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -69,8 +69,17 @@ Allocator *allocator() {
struct GlobalProc {
Mutex mtx;
Processor *proc;
-
- GlobalProc() : mtx(MutexTypeGlobalProc), proc(ProcCreate()) {}
+ // This mutex represents the internal allocator combined for
+ // the purposes of deadlock detection. The internal allocator
+ // uses multiple mutexes, moreover they are locked only occasionally
+ // and they are spin mutexes which don't support deadlock detection.
+ // So we use this fake mutex to serve as a substitute for these mutexes.
+ CheckedMutex internal_alloc_mtx;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc),
+ proc(ProcCreate()),
+ internal_alloc_mtx(MutexTypeInternalAlloc) {}
};
static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
@@ -78,6 +87,11 @@ GlobalProc *global_proc() {
return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
}
+static void InternalAllocAccess() {
+ global_proc()->internal_alloc_mtx.Lock();
+ global_proc()->internal_alloc_mtx.Unlock();
+}
+
ScopedGlobalProcessor::ScopedGlobalProcessor() {
GlobalProc *gp = global_proc();
ThreadState *thr = cur_thread();
@@ -110,6 +124,18 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
+void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Lock();
+ global_proc()->internal_alloc_mtx.Lock();
+ InternalAllocatorLock();
+}
+
+void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ InternalAllocatorUnlock();
+ global_proc()->internal_alloc_mtx.Unlock();
+ global_proc()->mtx.Unlock();
+}
+
static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
static uptr max_user_defined_malloc_size;
@@ -342,6 +368,7 @@ void *Alloc(uptr sz) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
+ InternalAllocAccess();
return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
}
@@ -351,6 +378,7 @@ void FreeImpl(void *p) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
+ InternalAllocAccess();
InternalFree(p, &thr->proc()->internal_alloc_cache);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.h b/compiler-rt/lib/tsan/rtl/tsan_mman.h
index efea5e5abdec..db8488eabbe2 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.h
@@ -24,6 +24,8 @@ void ReplaceSystemMalloc();
void AllocatorProcStart(Processor *proc);
void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
+void AllocatorLock();
+void AllocatorUnlock();
// For user allocations.
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index 3faa2d0c6192..1465f9953c19 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -25,6 +25,7 @@
#include "tsan_rtl.h"
#include "tsan_flags.h"
+#include <limits.h>
#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
@@ -45,70 +46,83 @@
namespace __tsan {
#if !SANITIZER_GO
-static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
- atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
- void *val = (void *)atomic_load_relaxed(a);
- atomic_signal_fence(memory_order_acquire); // Turns the previous load into
- // acquire wrt signals.
- if (UNLIKELY(val == nullptr)) {
- val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- CHECK(val);
- void *cmp = nullptr;
- if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
- memory_order_acq_rel)) {
- internal_munmap(val, size);
- val = cmp;
- }
- }
- return val;
+static char main_thread_state[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
+static ThreadState *dead_thread_state;
+static pthread_key_t thread_state_key;
+
+// We rely on the following documented, but Darwin-specific behavior to keep the
+// reference to the ThreadState object alive in TLS:
+// pthread_key_create man page:
+// If, after all the destructors have been called for all non-NULL values with
+// associated destructors, there are still some non-NULL values with
+// associated destructors, then the process is repeated. If, after at least
+// [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for
+// outstanding non-NULL values, there are still some non-NULL values with
+// associated destructors, the implementation stops calling destructors.
+static_assert(PTHREAD_DESTRUCTOR_ITERATIONS == 4, "Small number of iterations");
+static void ThreadStateDestructor(void *thr) {
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
}
-// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
-// problematic, because there are several places where interceptors are called
-// when TLVs are not accessible (early process startup, thread cleanup, ...).
-// The following provides a "poor man's TLV" implementation, where we use the
-// shadow memory of the pointer returned by pthread_self() to store a pointer to
-// the ThreadState object. The main thread's ThreadState is stored separately
-// in a static variable, because we need to access it even before the
-// shadow memory is set up.
-static uptr main_thread_identity = 0;
-ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
-static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state;
-
-// We cannot use pthread_self() before libpthread has been initialized. Our
-// current heuristic for guarding this is checking `main_thread_identity` which
-// is only assigned in `__tsan::InitializePlatform`.
-static ThreadState **cur_thread_location() {
- if (main_thread_identity == 0)
- return &main_thread_state_loc;
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity)
- return &main_thread_state_loc;
- return (ThreadState **)MemToShadow(thread_identity);
+static void InitializeThreadStateStorage() {
+ int res;
+ CHECK_EQ(thread_state_key, 0);
+ res = pthread_key_create(&thread_state_key, ThreadStateDestructor);
+ CHECK_EQ(res, 0);
+ res = pthread_setspecific(thread_state_key, main_thread_state);
+ CHECK_EQ(res, 0);
+
+ auto dts = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ dts->fast_state.SetIgnoreBit();
+ dts->ignore_interceptors = 1;
+ dts->is_dead = true;
+ const_cast<Tid &>(dts->tid) = kInvalidTid;
+ res = internal_mprotect(dts, sizeof(ThreadState), PROT_READ); // immutable
+ CHECK_EQ(res, 0);
+ dead_thread_state = dts;
}
ThreadState *cur_thread() {
- return (ThreadState *)SignalSafeGetOrAllocate(
- (uptr *)cur_thread_location(), sizeof(ThreadState));
+ // Some interceptors get called before libpthread has been initialized and in
+ // these cases we must avoid calling any pthread APIs.
+ if (UNLIKELY(!thread_state_key)) {
+ return (ThreadState *)main_thread_state;
+ }
+
+ // We only reach this line after InitializeThreadStateStorage() ran, i.e,
+ // after TSan (and therefore libpthread) have been initialized.
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ if (UNLIKELY(!thr)) {
+ thr = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
+ }
+ return thr;
}
void set_cur_thread(ThreadState *thr) {
- *cur_thread_location() = thr;
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
}
-// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
-// munmap first and then clear `fake_tls`; if we receive a signal in between,
-// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
- ThreadState **thr_state_loc = cur_thread_location();
- if (thr_state_loc == &main_thread_state_loc) {
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ CHECK(thr);
+ if (thr == (ThreadState *)main_thread_state) {
// Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
// exit the main thread. Let's keep the main thread's ThreadState.
return;
}
- internal_munmap(*thr_state_loc, sizeof(ThreadState));
- *thr_state_loc = nullptr;
+ // Intercepted functions can still get called after cur_thread_finalize()
+ // (called from DestroyThreadState()), so put a fake thread state for "dead"
+ // threads. An alternative solution would be to release the ThreadState
+ // object from THREAD_DESTROY (which is delivered later and on the parent
+ // thread) instead of THREAD_TERMINATE.
+ int res = pthread_setspecific(thread_state_key, dead_thread_state);
+ CHECK_EQ(res, 0);
+ UnmapOrDie(thr, sizeof(ThreadState));
}
#endif
@@ -222,11 +236,10 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
}
} else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
- if (thread == pthread_self()) {
- ThreadState *thr = cur_thread();
- if (thr->tctx) {
- DestroyThreadState();
- }
+ CHECK_EQ(thread, pthread_self());
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
}
}
@@ -253,8 +266,7 @@ void InitializePlatform() {
#if !SANITIZER_GO
CheckAndProtect();
- CHECK_EQ(main_thread_identity, 0);
- main_thread_identity = (uptr)pthread_self();
+ InitializeThreadStateStorage();
prev_pthread_introspection_hook =
pthread_introspection_hook_install(&my_pthread_introspection_hook);
@@ -286,24 +298,11 @@ uptr ExtractLongJmpSp(uptr *env) {
extern "C" void __tsan_tls_initialization() {}
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
- // The pointer to the ThreadState object is stored in the shadow memory
- // of the tls.
- uptr tls_end = tls_addr + tls_size;
- uptr thread_identity = (uptr)pthread_self();
const uptr pc = StackTrace::GetNextInstructionPc(
reinterpret_cast<uptr>(__tsan_tls_initialization));
- if (thread_identity == main_thread_identity) {
- MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
- } else {
- uptr thr_state_start = thread_identity;
- uptr thr_state_end = thr_state_start + sizeof(uptr);
- CHECK_GE(thr_state_start, tls_addr);
- CHECK_LE(thr_state_start, tls_addr + tls_size);
- CHECK_GE(thr_state_end, tls_addr);
- CHECK_LE(thr_state_end, tls_addr + tls_size);
- MemoryRangeImitateWrite(thr, pc, tls_addr, thr_state_start - tls_addr);
- MemoryRangeImitateWrite(thr, pc, thr_state_end, tls_end - thr_state_end);
- }
+ // Unlike Linux, we only store a pointer to the ThreadState object in TLS;
+ // just mark the entire range as written to.
+ MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
}
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index ff7726ef0608..c14af9788e32 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -34,6 +34,9 @@ extern "C" void __tsan_resume() {
__tsan_resumed = 1;
}
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_test_only_on_fork() {}
+
namespace __tsan {
#if !SANITIZER_GO
@@ -271,8 +274,39 @@ void DontNeedShadowFor(uptr addr, uptr size) {
}
#if !SANITIZER_GO
+// We call UnmapShadow before the actual munmap, at that point we don't yet
+// know if the provided address/size are sane. We can't call UnmapShadow
+// after the actual munmap becuase at that point the memory range can
+// already be reused for something else, so we can't rely on the munmap
+// return value to understand is the values are sane.
+// While calling munmap with insane values (non-canonical address, negative
+// size, etc) is an error, the kernel won't crash. We must also try to not
+// crash as the failure mode is very confusing (paging fault inside of the
+// runtime on some derived shadow address).
+static bool IsValidMmapRange(uptr addr, uptr size) {
+ if (size == 0)
+ return true;
+ if (static_cast<sptr>(size) < 0)
+ return false;
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return false;
+ // Check that if the start of the region belongs to one of app ranges,
+ // end of the region belongs to the same region.
+ const uptr ranges[][2] = {
+ {LoAppMemBeg(), LoAppMemEnd()},
+ {MidAppMemBeg(), MidAppMemEnd()},
+ {HiAppMemBeg(), HiAppMemEnd()},
+ };
+ for (auto range : ranges) {
+ if (addr >= range[0] && addr < range[1])
+ return addr + size <= range[1];
+ }
+ return false;
+}
+
void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
- if (size == 0) return;
+ if (size == 0 || !IsValidMmapRange(addr, size))
+ return;
DontNeedShadowFor(addr, size);
ScopedGlobalProcessor sgp;
ctx->metamap.ResetRange(thr->proc(), addr, size);
@@ -491,6 +525,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry.Lock();
ctx->report_mtx.Lock();
ScopedErrorReportLock::Lock();
+ AllocatorLock();
// Suppress all reports in the pthread_atfork callbacks.
// Reports will deadlock on the report_mtx.
// We could ignore sync operations as well,
@@ -499,12 +534,20 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports++;
// On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
// we'll assert in CheckNoLocks() unless we ignore interceptors.
+ // On OS X libSystem_atfork_prepare/parent/child callbacks are called
+ // after/before our callbacks and they call free.
thr->ignore_interceptors++;
+ // Disables memory write in OnUserAlloc/Free.
+ thr->ignore_reads_and_writes++;
+
+ __tsan_test_only_on_fork();
}
void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
+ thr->ignore_reads_and_writes--;
+ AllocatorUnlock();
ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry.Unlock();
@@ -514,6 +557,8 @@ void ForkChildAfter(ThreadState *thr, uptr pc,
bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
+ thr->ignore_reads_and_writes--;
+ AllocatorUnlock();
ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry.Unlock();
@@ -747,14 +792,17 @@ using namespace __tsan;
MutexMeta mutex_meta[] = {
{MutexInvalid, "Invalid", {}},
{MutexThreadRegistry, "ThreadRegistry", {}},
- {MutexTypeTrace, "Trace", {MutexLeaf}},
- {MutexTypeReport, "Report", {MutexTypeSyncVar}},
- {MutexTypeSyncVar, "SyncVar", {}},
+ {MutexTypeTrace, "Trace", {}},
+ {MutexTypeReport,
+ "Report",
+ {MutexTypeSyncVar, MutexTypeGlobalProc, MutexTypeTrace}},
+ {MutexTypeSyncVar, "SyncVar", {MutexTypeTrace}},
{MutexTypeAnnotations, "Annotations", {}},
{MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
{MutexTypeFired, "Fired", {MutexLeaf}},
{MutexTypeRacy, "Racy", {MutexLeaf}},
{MutexTypeGlobalProc, "GlobalProc", {}},
+ {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
{},
};
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index 811695d144c5..f332a6a8d1d8 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -346,7 +346,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
ThreadContext *tctx = FindThreadByTidLocked(b->tid);
auto *loc = New<ReportLocation>();
loc->type = ReportLocationHeap;
- loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_start = block_begin;
loc->heap_chunk_size = b->siz;
loc->external_tag = b->tag;
loc->tid = tctx ? tctx->tid : b->tid;