diff options
Diffstat (limited to 'contrib/compiler-rt/lib/tsan/rtl')
27 files changed, 528 insertions, 174 deletions
diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra b/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra index 22dfde914136..ab5b5a4fcbae 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra @@ -9,6 +9,16 @@ __tsan_java* __tsan_unaligned* __tsan_release __tsan_acquire +__tsan_mutex_create +__tsan_mutex_destroy +__tsan_mutex_pre_lock +__tsan_mutex_post_lock +__tsan_mutex_pre_unlock +__tsan_mutex_post_unlock +__tsan_mutex_pre_signal +__tsan_mutex_post_signal +__tsan_mutex_pre_divert +__tsan_mutex_post_divert __ubsan_* Annotate* WTFAnnotate* diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_debugging.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_debugging.cc index d9fb6861bc0c..a44b13632c61 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_debugging.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_debugging.cc @@ -24,6 +24,7 @@ static const char *ReportTypeDescription(ReportType typ) { if (typ == ReportTypeVptrRace) return "data-race-vptr"; if (typ == ReportTypeUseAfterFree) return "heap-use-after-free"; if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free-vptr"; + if (typ == ReportTypeExternalRace) return "external-race"; if (typ == ReportTypeThreadLeak) return "thread-leak"; if (typ == ReportTypeMutexDestroyLocked) return "locked-mutex-destroy"; if (typ == ReportTypeMutexDoubleLock) return "mutex-double-lock"; @@ -127,6 +128,16 @@ int __tsan_get_report_loc(void *report, uptr idx, const char **type, } SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_get_report_loc_object_type(void *report, uptr idx, + const char **object_type) { + const ReportDesc *rep = (ReportDesc *)report; + CHECK_LT(idx, rep->locs.Size()); + ReportLocation *loc = rep->locs[idx]; + *object_type = GetObjectTypeFromTag(loc->external_tag); + return 1; +} + +SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, int *destroyed, void **trace, uptr trace_size) { const ReportDesc *rep = (ReportDesc *)report; @@ -140,7 +151,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, } SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id, +int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id, int *running, const char **name, int *parent_tid, void **trace, uptr trace_size) { const ReportDesc *rep = (ReportDesc *)report; @@ -217,7 +228,7 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size, SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id, - uptr *os_id) { + tid_t *os_id) { MBlock *b = 0; Allocator *a = allocator(); if (a->PointerIsMine((void *)addr)) { diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h index 55580a5c4436..8a0381e61ab0 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -149,7 +149,8 @@ class RegionAlloc; // Descriptor of user's memory block. struct MBlock { - u64 siz; + u64 siz : 48; + u64 tag : 16; u32 stk; u16 tid; }; diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_external.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_external.cc new file mode 100644 index 000000000000..dc8ec62322ce --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_external.cc @@ -0,0 +1,78 @@ +//===-- tsan_external.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_rtl.h" + +namespace __tsan { + +#define CALLERPC ((uptr)__builtin_return_address(0)) + +const uptr kMaxTag = 128; // Limited to 65,536, since MBlock only stores tags + // as 16-bit values, see tsan_defs.h. + +const char *registered_tags[kMaxTag]; +static atomic_uint32_t used_tags{1}; // Tag 0 means "no tag". NOLINT + +const char *GetObjectTypeFromTag(uptr tag) { + if (tag == 0) return nullptr; + // Invalid/corrupted tag? Better return NULL and let the caller deal with it. + if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr; + return registered_tags[tag]; +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void *__tsan_external_register_tag(const char *object_type) { + uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed); + CHECK_LT(new_tag, kMaxTag); + registered_tags[new_tag] = internal_strdup(object_type); + return (void *)new_tag; +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_external_assign_tag(void *addr, void *tag) { + CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed)); + Allocator *a = allocator(); + MBlock *b = nullptr; + if (a->PointerIsMine((void *)addr)) { + void *block_begin = a->GetBlockBegin((void *)addr); + if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin); + } + if (b) { + b->tag = (uptr)tag; + } +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_external_read(void *addr, void *caller_pc, void *tag) { + CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed)); + ThreadState *thr = cur_thread(); + thr->external_tag = (uptr)tag; + FuncEntry(thr, (uptr)caller_pc); + MemoryRead(thr, CALLERPC, (uptr)addr, kSizeLog8); + FuncExit(thr); + thr->external_tag = 0; +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_external_write(void *addr, void *caller_pc, void *tag) { + CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed)); + ThreadState *thr = cur_thread(); + thr->external_tag = (uptr)tag; + FuncEntry(thr, (uptr)caller_pc); + MemoryWrite(thr, CALLERPC, (uptr)addr, kSizeLog8); + FuncExit(thr); + thr->external_tag = 0; +} +} // extern "C" + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc index d8d4746ab59b..89e22a132786 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc @@ -21,10 +21,6 @@ namespace __tsan { -Flags *flags() { - return &ctx->flags; -} - // Can be overriden in frontend. #ifdef TSAN_EXTERNAL_HOOKS extern "C" const char* __tsan_default_options(); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h index e2f6b3c9f021..66740def52fa 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h @@ -28,7 +28,6 @@ struct Flags : DDFlags { void ParseFromString(const char *str); }; -Flags *flags(); void InitializeFlags(Flags *flags, const char *env); } // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.inc index a48545c433ba..e9b3e35f07e5 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.inc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.inc @@ -79,7 +79,7 @@ TSAN_FLAG(bool, die_after_fork, true, TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.") TSAN_FLAG(bool, ignore_interceptors_accesses, false, "Ignore reads and writes from all interceptors.") -TSAN_FLAG(bool, ignore_noninstrumented_modules, false, +TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false, "Interceptors should only detect races when called from instrumented " "modules.") TSAN_FLAG(bool, shared_ptr_interceptor, true, diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc index 898f32df182b..d0fd91aec234 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc @@ -18,6 +18,7 @@ #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "interception/interception.h" @@ -29,9 +30,6 @@ #include "tsan_mman.h" #include "tsan_fd.h" -#if SANITIZER_POSIX -#include "sanitizer_common/sanitizer_posix.h" -#endif using namespace __tsan; // NOLINT @@ -46,13 +44,6 @@ using namespace __tsan; // NOLINT #define mallopt(a, b) #endif -#if SANITIZER_LINUX || SANITIZER_FREEBSD -#define PTHREAD_CREATE_DETACHED 1 -#elif SANITIZER_MAC -#define PTHREAD_CREATE_DETACHED 2 -#endif - - #ifdef __mips__ const int kSigCount = 129; #else @@ -277,7 +268,7 @@ ScopedInterceptor::~ScopedInterceptor() { void ScopedInterceptor::EnableIgnores() { if (ignoring_) { - ThreadIgnoreBegin(thr_, pc_); + ThreadIgnoreBegin(thr_, pc_, false); if (in_ignored_lib_) { DCHECK(!thr_->in_ignored_lib); thr_->in_ignored_lib = true; @@ -881,7 +872,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) { internal_sched_yield(); Processor *proc = ProcCreate(); ProcWire(proc, thr); - ThreadStart(thr, tid, GetTid()); + ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); atomic_store(&p->tid, 0, memory_order_release); } void *res = callback(param); @@ -928,8 +919,7 @@ TSAN_INTERCEPTOR(int, pthread_create, ThreadIgnoreEnd(thr, pc); } if (res == 0) { - int tid = ThreadCreate(thr, pc, *(uptr*)th, - detached == PTHREAD_CREATE_DETACHED); + int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached)); CHECK_NE(tid, 0); // Synchronization on p.tid serves two purposes: // 1. ThreadCreate must finish before the new thread starts. @@ -1025,7 +1015,7 @@ static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { ThreadSignalContext *ctx = SigCtx(arg->thr); CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); - MutexLock(arg->thr, arg->pc, (uptr)arg->m); + MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock); // Undo BlockingCall ctor effects. arg->thr->ignore_interceptors--; arg->si->~ScopedInterceptor(); @@ -1054,7 +1044,7 @@ static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg); } if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); - MutexLock(thr, pc, (uptr)m); + MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock); return res; } @@ -1114,14 +1104,15 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); int res = REAL(pthread_mutex_init)(m, a); if (res == 0) { - bool recursive = false; + u32 flagz = 0; if (a) { int type = 0; if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) - recursive = (type == PTHREAD_MUTEX_RECURSIVE - || type == PTHREAD_MUTEX_RECURSIVE_NP); + if (type == PTHREAD_MUTEX_RECURSIVE || + type == PTHREAD_MUTEX_RECURSIVE_NP) + flagz |= MutexFlagWriteReentrant; } - MutexCreate(thr, pc, (uptr)m, false, recursive, false); + MutexCreate(thr, pc, (uptr)m, flagz); } return res; } @@ -1141,7 +1132,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { if (res == EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); if (res == 0 || res == EOWNERDEAD) - MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); + MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); return res; } @@ -1150,7 +1141,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); int res = REAL(pthread_mutex_timedlock)(m, abstime); if (res == 0) { - MutexLock(thr, pc, (uptr)m); + MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } @@ -1161,7 +1152,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); int res = REAL(pthread_spin_init)(m, pshared); if (res == 0) { - MutexCreate(thr, pc, (uptr)m, false, false, false); + MutexCreate(thr, pc, (uptr)m); } return res; } @@ -1177,9 +1168,10 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); + MutexPreLock(thr, pc, (uptr)m); int res = REAL(pthread_spin_lock)(m); if (res == 0) { - MutexLock(thr, pc, (uptr)m); + MutexPostLock(thr, pc, (uptr)m); } return res; } @@ -1188,7 +1180,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); int res = REAL(pthread_spin_trylock)(m); if (res == 0) { - MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); + MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } @@ -1205,7 +1197,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); int res = REAL(pthread_rwlock_init)(m, a); if (res == 0) { - MutexCreate(thr, pc, (uptr)m, true, false, false); + MutexCreate(thr, pc, (uptr)m); } return res; } @@ -1221,9 +1213,10 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); + MutexPreReadLock(thr, pc, (uptr)m); int res = REAL(pthread_rwlock_rdlock)(m); if (res == 0) { - MutexReadLock(thr, pc, (uptr)m); + MutexPostReadLock(thr, pc, (uptr)m); } return res; } @@ -1232,7 +1225,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); int res = REAL(pthread_rwlock_tryrdlock)(m); if (res == 0) { - MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true); + MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } @@ -1242,7 +1235,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); if (res == 0) { - MutexReadLock(thr, pc, (uptr)m); + MutexPostReadLock(thr, pc, (uptr)m); } return res; } @@ -1250,9 +1243,10 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); + MutexPreLock(thr, pc, (uptr)m); int res = REAL(pthread_rwlock_wrlock)(m); if (res == 0) { - MutexLock(thr, pc, (uptr)m); + MutexPostLock(thr, pc, (uptr)m); } return res; } @@ -1261,7 +1255,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); int res = REAL(pthread_rwlock_trywrlock)(m); if (res == 0) { - MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); + MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } @@ -1271,7 +1265,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); if (res == 0) { - MutexLock(thr, pc, (uptr)m); + MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); } return res; } @@ -1644,24 +1638,6 @@ TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { #define TSAN_MAYBE_INTERCEPT_TMPFILE64 #endif -TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) { - // libc file streams can call user-supplied functions, see fopencookie. - { - SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f); - MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true); - } - return REAL(fread)(ptr, size, nmemb, f); -} - -TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) { - // libc file streams can call user-supplied functions, see fopencookie. - { - SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f); - MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false); - } - return REAL(fwrite)(p, size, nmemb, f); -} - static void FlushStreams() { // Flushing all the streams here may freeze the process if a child thread is // performing file stream operations at the same time. @@ -2251,8 +2227,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ OnExit(((TsanInterceptorContext *) ctx)->thr) -#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \ - MutexLock(((TsanInterceptorContext *)ctx)->thr, \ +#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \ + MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \ + ((TsanInterceptorContext *)ctx)->pc, (uptr)m) + +#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \ + MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cc index fc5eb0499076..f6bf8a0e586b 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cc @@ -281,6 +281,12 @@ TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply, (connection, message, replyq, new_handler); } +TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { + SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection); + Release(thr, pc, (uptr)connection); + REAL(xpc_connection_cancel)(connection); +} + // On macOS, libc++ is always linked dynamically, so intercepting works the // usual way. #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h index 4e342a58a066..71986283ee17 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h @@ -18,6 +18,7 @@ #include <sanitizer_common/sanitizer_internal_defs.h> using __sanitizer::uptr; +using __sanitizer::tid_t; // This header should NOT include any other headers. // All functions in this header are extern "C" and start with __tsan_. @@ -79,6 +80,15 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin(); SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end(); SANITIZER_INTERFACE_ATTRIBUTE +void *__tsan_external_register_tag(const char *object_type); +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_external_assign_tag(void *addr, void *tag); +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_external_read(void *addr, void *caller_pc, void *tag); +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_external_write(void *addr, void *caller_pc, void *tag); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read_range(void *addr, unsigned long size); // NOLINT SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write_range(void *addr, unsigned long size); // NOLINT @@ -123,6 +133,10 @@ int __tsan_get_report_loc(void *report, uptr idx, const char **type, int *fd, int *suppressable, void **trace, uptr trace_size); +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_get_report_loc_object_type(void *report, uptr idx, + const char **object_type); + // Returns information about mutexes included in the report. SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, @@ -130,7 +144,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr, // Returns information about threads included in the report. SANITIZER_INTERFACE_ATTRIBUTE -int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id, +int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id, int *running, const char **name, int *parent_tid, void **trace, uptr trace_size); @@ -147,7 +161,7 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size, // Returns the allocation stack for a heap pointer. SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id, - uptr *os_id); + tid_t *os_id); #endif // SANITIZER_GO diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc index 62db79661625..810c84025f23 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc @@ -31,11 +31,10 @@ namespace __tsan { class ScopedAnnotation { public: - ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l, - uptr pc) + ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc) : thr_(thr) { FuncEntry(thr_, pc); - DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l); + DPrintf("#%d: annotation %s()\n", thr_->tid, aname); } ~ScopedAnnotation() { @@ -46,18 +45,20 @@ class ScopedAnnotation { ThreadState *const thr_; }; -#define SCOPED_ANNOTATION(typ) \ +#define SCOPED_ANNOTATION_RET(typ, ret) \ if (!flags()->enable_annotations) \ - return; \ + return ret; \ ThreadState *thr = cur_thread(); \ const uptr caller_pc = (uptr)__builtin_return_address(0); \ StatInc(thr, StatAnnotation); \ StatInc(thr, Stat##typ); \ - ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \ + ScopedAnnotation sa(thr, __func__, caller_pc); \ const uptr pc = StackTrace::GetCurrentPc(); \ (void)pc; \ /**/ +#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, ) + static const int kMaxDescLen = 128; struct ExpectRace { @@ -252,12 +253,12 @@ void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreate); - MutexCreate(thr, pc, m, true, true, false); + MutexCreate(thr, pc, m, MutexFlagWriteReentrant); } void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); - MutexCreate(thr, pc, m, true, true, true); + MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit); } void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { @@ -269,9 +270,9 @@ void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) { SCOPED_ANNOTATION(AnnotateRWLockAcquired); if (is_w) - MutexLock(thr, pc, m); + MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); else - MutexReadLock(thr, pc, m); + MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); } void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, @@ -458,4 +459,95 @@ void INTERFACE_ATTRIBUTE AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} void INTERFACE_ATTRIBUTE AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} + +// Note: the parameter is called flagz, because flags is already taken +// by the global function that returns flags. +INTERFACE_ATTRIBUTE +void __tsan_mutex_create(void *m, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_create); + MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_destroy(void *m, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_destroy); + MutexDestroy(thr, pc, (uptr)m); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_pre_lock(void *m, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_pre_lock); + if (!(flagz & MutexFlagTryLock)) { + if (flagz & MutexFlagReadLock) + MutexPreReadLock(thr, pc, (uptr)m); + else + MutexPreLock(thr, pc, (uptr)m); + } + ThreadIgnoreBegin(thr, pc, false); + ThreadIgnoreSyncBegin(thr, pc, false); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) { + SCOPED_ANNOTATION(__tsan_mutex_post_lock); + ThreadIgnoreSyncEnd(thr, pc); + ThreadIgnoreEnd(thr, pc); + if (!(flagz & MutexFlagTryLockFailed)) { + if (flagz & MutexFlagReadLock) + MutexPostReadLock(thr, pc, (uptr)m, flagz); + else + MutexPostLock(thr, pc, (uptr)m, flagz, rec); + } +} + +INTERFACE_ATTRIBUTE +int __tsan_mutex_pre_unlock(void *m, unsigned flagz) { + SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0); + int ret = 0; + if (flagz & MutexFlagReadLock) { + CHECK(!(flagz & MutexFlagRecursiveUnlock)); + MutexReadUnlock(thr, pc, (uptr)m); + } else { + ret = MutexUnlock(thr, pc, (uptr)m, flagz); + } + ThreadIgnoreBegin(thr, pc, false); + ThreadIgnoreSyncBegin(thr, pc, false); + return ret; +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_post_unlock(void *m, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_post_unlock); + ThreadIgnoreSyncEnd(thr, pc); + ThreadIgnoreEnd(thr, pc); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_pre_signal(void *addr, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_pre_signal); + ThreadIgnoreBegin(thr, pc, false); + ThreadIgnoreSyncBegin(thr, pc, false); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_post_signal(void *addr, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_post_signal); + ThreadIgnoreSyncEnd(thr, pc); + ThreadIgnoreEnd(thr, pc); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_pre_divert(void *addr, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_pre_divert); + // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal. + ThreadIgnoreSyncEnd(thr, pc); + ThreadIgnoreEnd(thr, pc); +} + +INTERFACE_ATTRIBUTE +void __tsan_mutex_post_divert(void *addr, unsigned flagz) { + SCOPED_ANNOTATION(__tsan_mutex_post_divert); + ThreadIgnoreBegin(thr, pc, false); + ThreadIgnoreSyncBegin(thr, pc, false); +} } // extern "C" diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc index 5238b66a2e51..b22d5c1ecef8 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -450,13 +450,32 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { // C/C++ +static morder convert_morder(morder mo) { + if (flags()->force_seq_cst_atomics) + return (morder)mo_seq_cst; + + // Filter out additional memory order flags: + // MEMMODEL_SYNC = 1 << 15 + // __ATOMIC_HLE_ACQUIRE = 1 << 16 + // __ATOMIC_HLE_RELEASE = 1 << 17 + // + // HLE is an optimization, and we pretend that elision always fails. + // MEMMODEL_SYNC is used when lowering __sync_ atomics, + // since we use __sync_ atomics for actual atomic operations, + // we can safely ignore it as well. It also subtly affects semantics, + // but we don't model the difference. + return (morder)(mo & 0x7fff); +} + #define SCOPED_ATOMIC(func, ...) \ - const uptr callpc = (uptr)__builtin_return_address(0); \ - uptr pc = StackTrace::GetCurrentPc(); \ - mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ ThreadState *const thr = cur_thread(); \ - if (thr->ignore_interceptors) \ + if (thr->ignore_sync || thr->ignore_interceptors) { \ + ProcessPendingSignals(thr); \ return NoTsanAtomic##func(__VA_ARGS__); \ + } \ + const uptr callpc = (uptr)__builtin_return_address(0); \ + uptr pc = StackTrace::GetCurrentPc(); \ + mo = convert_morder(mo); \ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ ScopedAtomic sa(thr, callpc, a, mo, __func__); \ return Atomic##func(thr, pc, __VA_ARGS__); \ diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc index 5bdc04f07567..75e960e629f9 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc @@ -180,8 +180,8 @@ void __tsan_java_mutex_lock(jptr addr) { CHECK_GE(addr, jctx->heap_begin); CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); - MutexCreate(thr, pc, addr, true, true, true); - MutexLock(thr, pc, addr); + MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant | + MutexFlagDoPreLockOnPostLock); } void __tsan_java_mutex_unlock(jptr addr) { @@ -201,8 +201,8 @@ void __tsan_java_mutex_read_lock(jptr addr) { CHECK_GE(addr, jctx->heap_begin); CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); - MutexCreate(thr, pc, addr, true, true, true); - MutexReadLock(thr, pc, addr); + MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit | + MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock); } void __tsan_java_mutex_read_unlock(jptr addr) { @@ -223,8 +223,8 @@ void __tsan_java_mutex_lock_rec(jptr addr, int rec) { CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); CHECK_GT(rec, 0); - MutexCreate(thr, pc, addr, true, true, true); - MutexLock(thr, pc, addr, rec); + MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant | + MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec); } int __tsan_java_mutex_unlock_rec(jptr addr) { @@ -234,7 +234,7 @@ int __tsan_java_mutex_unlock_rec(jptr addr) { CHECK_GE(addr, jctx->heap_begin); CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); - return MutexUnlock(thr, pc, addr, true); + return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock); } void __tsan_java_acquire(jptr addr) { diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_libdispatch_mac.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_libdispatch_mac.cc index d8c689ebb5fc..8c759a3be4e1 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_libdispatch_mac.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_libdispatch_mac.cc @@ -93,14 +93,15 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc, new_context->free_context_in_callback = true; new_context->submitted_synchronously = false; new_context->is_barrier_block = false; + new_context->non_queue_sync_object = 0; return new_context; } -#define GET_QUEUE_SYNC_VARS(context, q) \ - bool is_queue_serial = q && IsQueueSerial(q); \ - uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \ - uptr serial_sync = (uptr)sync_ptr; \ - uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr); \ +#define GET_QUEUE_SYNC_VARS(context, q) \ + bool is_queue_serial = q && IsQueueSerial(q); \ + uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \ + uptr serial_sync = (uptr)sync_ptr; \ + uptr concurrent_sync = sync_ptr ? ((uptr)sync_ptr) + sizeof(uptr) : 0; \ bool serial_task = context->is_barrier_block || is_queue_serial static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc, @@ -111,8 +112,8 @@ static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc, dispatch_queue_t q = context->queue; do { GET_QUEUE_SYNC_VARS(context, q); - Acquire(thr, pc, serial_sync); - if (serial_task) Acquire(thr, pc, concurrent_sync); + if (serial_sync) Acquire(thr, pc, serial_sync); + if (serial_task && concurrent_sync) Acquire(thr, pc, concurrent_sync); if (q) q = GetTargetQueueFromQueue(q); } while (q); @@ -126,7 +127,8 @@ static void dispatch_sync_post_execute(ThreadState *thr, uptr pc, dispatch_queue_t q = context->queue; do { GET_QUEUE_SYNC_VARS(context, q); - Release(thr, pc, serial_task ? serial_sync : concurrent_sync); + if (serial_task && serial_sync) Release(thr, pc, serial_sync); + if (!serial_task && concurrent_sync) Release(thr, pc, concurrent_sync); if (q) q = GetTargetQueueFromQueue(q); } while (q); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc index 25dd241d826f..b8d3d5528bb5 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc @@ -207,7 +207,7 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread, ThreadState *parent_thread_state = nullptr; // No parent. int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true); CHECK_NE(tid, 0); - ThreadStart(thr, tid, GetTid()); + ThreadStart(thr, tid, GetTid(), /*workerthread*/ true); } } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) { if (thread == pthread_self()) { diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc index 07fd41208eb7..7de00840cdbc 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc @@ -90,6 +90,8 @@ static const char *ReportTypeString(ReportType typ) { return "heap-use-after-free"; if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free (virtual call vs free)"; + if (typ == ReportTypeExternalRace) + return "race on a library object"; if (typ == ReportTypeThreadLeak) return "thread leak"; if (typ == ReportTypeMutexDestroyLocked) @@ -152,14 +154,25 @@ static const char *MopDesc(bool first, bool write, bool atomic) { : (write ? "Previous write" : "Previous read")); } +static const char *ExternalMopDesc(bool first, bool write) { + return first ? (write ? "Mutating" : "Read-only") + : (write ? "Previous mutating" : "Previous read-only"); +} + static void PrintMop(const ReportMop *mop, bool first) { Decorator d; char thrbuf[kThreadBufSize]; Printf("%s", d.Access()); - Printf(" %s of size %d at %p by %s", - MopDesc(first, mop->write, mop->atomic), - mop->size, (void*)mop->addr, - thread_name(thrbuf, mop->tid)); + const char *object_type = GetObjectTypeFromTag(mop->external_tag); + if (!object_type) { + Printf(" %s of size %d at %p by %s", + MopDesc(first, mop->write, mop->atomic), mop->size, + (void *)mop->addr, thread_name(thrbuf, mop->tid)); + } else { + Printf(" %s access of object %s at %p by %s", + ExternalMopDesc(first, mop->write), object_type, + (void *)mop->addr, thread_name(thrbuf, mop->tid)); + } PrintMutexSet(mop->mset); Printf(":\n"); Printf("%s", d.EndAccess()); @@ -183,9 +196,16 @@ static void PrintLocation(const ReportLocation *loc) { global.module_offset); } else if (loc->type == ReportLocationHeap) { char thrbuf[kThreadBufSize]; - Printf(" Location is heap block of size %zu at %p allocated by %s:\n", - loc->heap_chunk_size, loc->heap_chunk_start, - thread_name(thrbuf, loc->tid)); + const char *object_type = GetObjectTypeFromTag(loc->external_tag); + if (!object_type) { + Printf(" Location is heap block of size %zu at %p allocated by %s:\n", + loc->heap_chunk_size, loc->heap_chunk_start, + thread_name(thrbuf, loc->tid)); + } else { + Printf(" Location is %s object of size %zu at %p allocated by %s:\n", + object_type, loc->heap_chunk_size, loc->heap_chunk_start, + thread_name(thrbuf, loc->tid)); + } print_stack = true; } else if (loc->type == ReportLocationStack) { Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid)); @@ -235,9 +255,15 @@ static void PrintThread(const ReportThread *rt) { if (rt->name && rt->name[0] != '\0') Printf(" '%s'", rt->name); char thrbuf[kThreadBufSize]; - Printf(" (tid=%zu, %s) created by %s", - rt->os_id, rt->running ? "running" : "finished", - thread_name(thrbuf, rt->parent_tid)); + const char *thread_status = rt->running ? "running" : "finished"; + if (rt->workerthread) { + Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status); + Printf("\n"); + Printf("%s", d.EndThreadDescription()); + return; + } + Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status, + thread_name(thrbuf, rt->parent_tid)); if (rt->stack) Printf(" at:"); Printf("\n"); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h index d0b9d7458bf8..a0473e8dbdad 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h @@ -24,6 +24,7 @@ enum ReportType { ReportTypeVptrRace, ReportTypeUseAfterFree, ReportTypeVptrUseAfterFree, + ReportTypeExternalRace, ReportTypeThreadLeak, ReportTypeMutexDestroyLocked, ReportTypeMutexDoubleLock, @@ -56,6 +57,7 @@ struct ReportMop { int size; bool write; bool atomic; + uptr external_tag; Vector<ReportMopMutex> mset; ReportStack *stack; @@ -75,6 +77,7 @@ struct ReportLocation { DataInfo global; uptr heap_chunk_start; uptr heap_chunk_size; + uptr external_tag; int tid; int fd; bool suppressable; @@ -87,10 +90,11 @@ struct ReportLocation { struct ReportThread { int id; - uptr os_id; + tid_t os_id; bool running; + bool workerthread; char *name; - int parent_tid; + u32 parent_tid; ReportStack *stack; }; diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc index bfb835889c7a..70393037e786 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc @@ -381,7 +381,7 @@ void Initialize(ThreadState *thr) { // Initialize thread 0. int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); - ThreadStart(thr, tid, internal_getpid()); + ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); #if TSAN_CONTAINS_UBSAN __ubsan::InitAsPlugin(); #endif @@ -980,21 +980,21 @@ void FuncExit(ThreadState *thr) { thr->shadow_stack_pos--; } -void ThreadIgnoreBegin(ThreadState *thr, uptr pc) { +void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); thr->ignore_reads_and_writes++; CHECK_GT(thr->ignore_reads_and_writes, 0); thr->fast_state.SetIgnoreBit(); #if !SANITIZER_GO - if (!ctx->after_multithreaded_fork) + if (save_stack && !ctx->after_multithreaded_fork) thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); #endif } void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); + CHECK_GT(thr->ignore_reads_and_writes, 0); thr->ignore_reads_and_writes--; - CHECK_GE(thr->ignore_reads_and_writes, 0); if (thr->ignore_reads_and_writes == 0) { thr->fast_state.ClearIgnoreBit(); #if !SANITIZER_GO @@ -1011,20 +1011,20 @@ uptr __tsan_testonly_shadow_stack_current_size() { } #endif -void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { +void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); thr->ignore_sync++; CHECK_GT(thr->ignore_sync, 0); #if !SANITIZER_GO - if (!ctx->after_multithreaded_fork) + if (save_stack && !ctx->after_multithreaded_fork) thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); #endif } void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); + CHECK_GT(thr->ignore_sync, 0); thr->ignore_sync--; - CHECK_GE(thr->ignore_sync, 0); #if !SANITIZER_GO if (thr->ignore_sync == 0) thr->sync_ignore_set.Reset(); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h index 7fcb9d48e038..3481c31ebb1c 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -410,6 +410,7 @@ struct ThreadState { bool is_dead; bool is_freeing; bool is_vptr_access; + uptr external_tag; const uptr stk_addr; const uptr stk_size; const uptr tls_addr; @@ -545,6 +546,10 @@ struct Context { extern Context *ctx; // The one and the only global runtime context. +ALWAYS_INLINE Flags *flags() { + return &ctx->flags; +} + struct ScopedIgnoreInterceptors { ScopedIgnoreInterceptors() { #if !SANITIZER_GO @@ -564,7 +569,7 @@ class ScopedReport { explicit ScopedReport(ReportType typ); ~ScopedReport(); - void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, + void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack, const MutexSet *mset); void AddStack(StackTrace stack, bool suppressable = false); void AddThread(const ThreadContext *tctx, bool suppressable = false); @@ -640,6 +645,8 @@ bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); +const char *GetObjectTypeFromTag(uptr tag); + #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 # define DPrintf Printf #else @@ -704,16 +711,16 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); -void ThreadIgnoreBegin(ThreadState *thr, uptr pc); +void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true); void ThreadIgnoreEnd(ThreadState *thr, uptr pc); -void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); +void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true); void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); void FuncEntry(ThreadState *thr, uptr pc); void FuncExit(ThreadState *thr); int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); -void ThreadStart(ThreadState *thr, int tid, uptr os_id); +void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread); void ThreadFinish(ThreadState *thr); int ThreadTid(ThreadState *thr, uptr pc, uptr uid); void ThreadJoin(ThreadState *thr, uptr pc, int tid); @@ -728,13 +735,16 @@ void ProcDestroy(Processor *proc); void ProcWire(Processor *proc, ThreadState *thr); void ProcUnwire(Processor *proc, ThreadState *thr); -void MutexCreate(ThreadState *thr, uptr pc, uptr addr, - bool rw, bool recursive, bool linker_init); +// Note: the parameter is called flagz, because flags is already taken +// by the global function that returns flags. +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); -void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, - bool try_lock = false); -int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); -void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false); +void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); +void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0, + int rec = 1); +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); +void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); +void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0); void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc index f3b51c30faff..086b28927919 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -62,20 +62,17 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, OutputReport(thr, rep); } -void MutexCreate(ThreadState *thr, uptr pc, uptr addr, - bool rw, bool recursive, bool linker_init) { - DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr); +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { + DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz); StatInc(thr, StatMutexCreate); - if (!linker_init && IsAppMem(addr)) { + if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); - s->is_rw = rw; - s->is_recursive = recursive; - s->is_linker_init = linker_init; + s->SetFlags(flagz & MutexCreationFlagMask); if (!SANITIZER_GO && s->creation_stack_id == 0) s->creation_stack_id = CurrentStackId(thr, pc); s->mtx.Unlock(); @@ -87,7 +84,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s == 0) return; - if (s->is_linker_init) { + if (s->IsFlagSet(MutexFlagLinkerInit)) { // Destroy is no-op for linker-initialized mutexes. s->mtx.Unlock(); return; @@ -100,8 +97,8 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { bool unlock_locked = false; if (flags()->report_destroy_locked && s->owner_tid != SyncVar::kInvalidTid - && !s->is_broken) { - s->is_broken = true; + && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); unlock_locked = true; } u64 mid = s->GetId(); @@ -141,12 +138,33 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { // s will be destroyed and freed in MetaMap::FreeBlock. } -void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { - DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec); - CHECK_GT(rec, 0); +void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { + DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); + if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); + s->UpdateFlags(flagz); + if (s->owner_tid != thr->tid) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeLock(&cb, &s->dd, true); + s->mtx.ReadUnlock(); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } else { + s->mtx.ReadUnlock(); + } + } +} + +void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) { + DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n", + thr->tid, addr, flagz, rec); + if (flagz & MutexFlagRecursiveLock) + CHECK_GT(rec, 0); + else + rec = 1; if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); bool report_double_lock = false; @@ -156,38 +174,43 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { s->last_lock = thr->fast_state.raw(); } else if (s->owner_tid == thr->tid) { CHECK_GT(s->recursion, 0); - } else if (flags()->report_mutex_bugs && !s->is_broken) { - s->is_broken = true; + } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); report_double_lock = true; } - if (s->recursion == 0) { + const bool first = s->recursion == 0; + s->recursion += rec; + if (first) { StatInc(thr, StatMutexLock); AcquireImpl(thr, pc, &s->clock); AcquireImpl(thr, pc, &s->read_clock); - } else if (!s->is_recursive) { + } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) { StatInc(thr, StatMutexRecLock); } - s->recursion += rec; thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); - if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) { + bool pre_lock = false; + if (first && common_flags()->detect_deadlocks) { + pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && + !(flagz & MutexFlagTryLock); Callback cb(thr, pc); - if (!try_lock) + if (pre_lock) ctx->dd->MutexBeforeLock(&cb, &s->dd, true); - ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock); + ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock); } u64 mid = s->GetId(); s->mtx.Unlock(); // Can't touch s after this point. + s = 0; if (report_double_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid); - if (common_flags()->detect_deadlocks) { + if (first && pre_lock && common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } } -int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { - DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all); +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { + DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); @@ -196,12 +219,12 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { int rec = 0; bool report_bad_unlock = false; if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) { - if (flags()->report_mutex_bugs && !s->is_broken) { - s->is_broken = true; + if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } } else { - rec = all ? s->recursion : 1; + rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1; s->recursion -= rec; if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); @@ -229,36 +252,53 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { return rec; } -void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) { - DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr); +void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { + DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); + if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); + s->UpdateFlags(flagz); + Callback cb(thr, pc); + ctx->dd->MutexBeforeLock(&cb, &s->dd, false); + s->mtx.ReadUnlock(); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } +} + +void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { + DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); StatInc(thr, StatMutexReadLock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); + s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); bool report_bad_lock = false; if (s->owner_tid != SyncVar::kInvalidTid) { - if (flags()->report_mutex_bugs && !s->is_broken) { - s->is_broken = true; + if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); report_bad_lock = true; } } AcquireImpl(thr, pc, &s->clock); s->last_lock = thr->fast_state.raw(); thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); - if (common_flags()->detect_deadlocks && s->recursion == 0) { + bool pre_lock = false; + if (common_flags()->detect_deadlocks) { + pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) && + !(flagz & MutexFlagTryLock); Callback cb(thr, pc); - if (!trylock) + if (pre_lock) ctx->dd->MutexBeforeLock(&cb, &s->dd, false); - ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock); + ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock); } u64 mid = s->GetId(); s->mtx.ReadUnlock(); // Can't touch s after this point. + s = 0; if (report_bad_lock) ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid); - if (common_flags()->detect_deadlocks) { + if (pre_lock && common_flags()->detect_deadlocks) { Callback cb(thr, pc); ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); } @@ -274,8 +314,8 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); bool report_bad_unlock = false; if (s->owner_tid != SyncVar::kInvalidTid) { - if (flags()->report_mutex_bugs && !s->is_broken) { - s->is_broken = true; + if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } } @@ -323,8 +363,8 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { } else { StatInc(thr, StatMutexRecUnlock); } - } else if (!s->is_broken) { - s->is_broken = true; + } else if (!s->IsFlagSet(MutexFlagBroken)) { + s->SetFlags(MutexFlagBroken); report_bad_unlock = true; } thr->mset.Del(s->GetId(), write); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc index bc8944fbfb58..31b9e97898b0 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc @@ -164,8 +164,8 @@ void ScopedReport::AddStack(StackTrace stack, bool suppressable) { (*rs)->suppressable = suppressable; } -void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, - const MutexSet *mset) { +void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, + StackTrace stack, const MutexSet *mset) { void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); ReportMop *mop = new(mem) ReportMop; rep_->mops.PushBack(mop); @@ -175,6 +175,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, mop->write = s.IsWrite(); mop->atomic = s.IsAtomic(); mop->stack = SymbolizeStack(stack); + mop->external_tag = external_tag; if (mop->stack) mop->stack->suppressable = true; for (uptr i = 0; i < mset->Size(); i++) { @@ -202,6 +203,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { rt->running = (tctx->status == ThreadStatusRunning); rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; + rt->workerthread = tctx->workerthread; rt->stack = 0; rt->stack = SymbolizeStackId(tctx->creation_stack_id); if (rt->stack) @@ -336,6 +338,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { ReportLocation *loc = ReportLocation::New(ReportLocationHeap); loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); loc->heap_chunk_size = b->siz; + loc->external_tag = b->tag; loc->tid = tctx ? tctx->tid : b->tid; loc->stack = SymbolizeStackId(b->stk); rep_->locs.PushBack(loc); @@ -622,6 +625,8 @@ void ReportRace(ThreadState *thr) { typ = ReportTypeVptrRace; else if (freed) typ = ReportTypeUseAfterFree; + else if (thr->external_tag > 0) + typ = ReportTypeExternalRace; if (IsFiredSuppression(ctx, typ, addr)) return; @@ -650,7 +655,8 @@ void ReportRace(ThreadState *thr) { ScopedReport rep(typ); for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2); + rep.AddMemoryAccess(addr, thr->external_tag, s, traces[i], + i == 0 ? &thr->mset : mset2); } for (uptr i = 0; i < kMop; i++) { diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc index 5b17dc60bcbe..6a0943c49588 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc @@ -236,7 +236,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { return tid; } -void ThreadStart(ThreadState *thr, int tid, uptr os_id) { +void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread) { uptr stk_addr = 0; uptr stk_size = 0; uptr tls_addr = 0; @@ -266,7 +266,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { ThreadRegistry *tr = ctx->thread_registry; OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; - tr->StartThread(tid, os_id, &args); + tr->StartThread(tid, os_id, workerthread, &args); tr->Lock(); thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc index d1d6ed24d991..2ee688bf5771 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc @@ -153,6 +153,16 @@ void StatOutput(u64 *stat) { name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange "; name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange "; name[StatAnnotateThreadName] = " ThreadName "; + name[Stat__tsan_mutex_create] = " __tsan_mutex_create "; + name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy "; + name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock "; + name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock "; + name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock "; + name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock "; + name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal "; + name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal "; + name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert "; + name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert "; name[StatMtxTotal] = "Contentionz "; name[StatMtxTrace] = " Trace "; diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h index 8447dd84fc17..7d2791ebbfcc 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h @@ -157,6 +157,16 @@ enum StatType { StatAnnotatePublishMemoryRange, StatAnnotateUnpublishMemoryRange, StatAnnotateThreadName, + Stat__tsan_mutex_create, + Stat__tsan_mutex_destroy, + Stat__tsan_mutex_pre_lock, + Stat__tsan_mutex_post_lock, + Stat__tsan_mutex_pre_unlock, + Stat__tsan_mutex_post_unlock, + Stat__tsan_mutex_pre_signal, + Stat__tsan_mutex_post_signal, + Stat__tsan_mutex_pre_divert, + Stat__tsan_mutex_post_divert, // Internal mutex contentionz. StatMtxTotal, diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc index bfb64e0018fb..e39702b7d22a 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc @@ -74,6 +74,8 @@ static const char *conv(ReportType typ) { return kSuppressionRace; else if (typ == ReportTypeVptrUseAfterFree) return kSuppressionRace; + else if (typ == ReportTypeExternalRace) + return kSuppressionRace; else if (typ == ReportTypeThreadLeak) return kSuppressionThread; else if (typ == ReportTypeMutexDestroyLocked) diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc index 44c6a26a1e8e..4cc3cb89c34f 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc @@ -42,10 +42,7 @@ void SyncVar::Reset(Processor *proc) { owner_tid = kInvalidTid; last_lock = 0; recursion = 0; - is_rw = 0; - is_recursive = 0; - is_broken = 0; - is_linker_init = 0; + atomic_store_relaxed(&flags, 0); if (proc == 0) { CHECK_EQ(clock.size(), 0); @@ -64,6 +61,7 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache); MBlock *b = block_alloc_.Map(idx); b->siz = sz; + b->tag = 0; b->tid = thr->tid; b->stk = CurrentStackId(thr, pc); u32 *meta = MemToMeta(p); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h index 86e6bbd55bac..d24d69762171 100644 --- a/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -23,6 +23,29 @@ namespace __tsan { +// These need to match __tsan_mutex_* flags defined in tsan_interface.h. +// See documentation there as well. +enum MutexFlags { + MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init + MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant + MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant + MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock + MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock + MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed + MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock + MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock + + // The following flags are runtime private. + // Mutex API misuse was detected, so don't report any more. + MutexFlagBroken = 1 << 30, + // We did not intercept pre lock event, so handle it on post lock. + MutexFlagDoPreLockOnPostLock = 1 << 29, + // Must list all mutex creation flags. + MutexCreationFlagMask = MutexFlagLinkerInit | + MutexFlagWriteReentrant | + MutexFlagReadReentrant, +}; + struct SyncVar { SyncVar(); @@ -35,10 +58,7 @@ struct SyncVar { int owner_tid; // Set only by exclusive owners. u64 last_lock; int recursion; - bool is_rw; - bool is_recursive; - bool is_broken; - bool is_linker_init; + atomic_uint32_t flags; u32 next; // in MetaMap DDMutex dd; SyncClock read_clock; // Used for rw mutexes only. @@ -61,6 +81,26 @@ struct SyncVar { *uid = id >> 48; return (uptr)GetLsb(id, 48); } + + bool IsFlagSet(u32 f) const { + return atomic_load_relaxed(&flags); + } + + void SetFlags(u32 f) { + atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f); + } + + void UpdateFlags(u32 flagz) { + // Filter out operation flags. + if (!(flagz & MutexCreationFlagMask)) + return; + u32 current = atomic_load_relaxed(&flags); + if (current & MutexCreationFlagMask) + return; + // Note: this can be called from MutexPostReadLock which holds only read + // lock on the SyncVar. + atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask)); + } }; /* MetaMap allows to map arbitrary user pointers onto various descriptors. |