diff options
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_defs.h | 1 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interceptors.cc | 2 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_mman.cc | 2 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_report.cc | 2 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl.cc | 64 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl.h | 49 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc | 48 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc | 34 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc | 363 |
9 files changed, 230 insertions, 335 deletions
diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h index 5c5ab9ead6f..27f1db38f3a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_defs.h +++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -156,7 +156,6 @@ struct MD5Hash { MD5Hash md5_hash(const void *data, uptr size); struct ThreadState; -struct ThreadContext; struct Context; struct ReportStack; class ReportDesc; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc index 7935c2ff72e..352c7bb12b9 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc @@ -1800,7 +1800,7 @@ void ProcessPendingSignals(ThreadState *thr) { (uptr)sigactions[sig].sa_sigaction : (uptr)sigactions[sig].sa_handler; stack.Init(&pc, 1); - Lock l(&ctx->thread_mtx); + ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); if (!IsFiredSuppression(ctx, rep, stack)) { rep.AddStack(&stack); diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/compiler-rt/lib/tsan/rtl/tsan_mman.cc index fb324834e72..10b6ca5ff82 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cc @@ -56,7 +56,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { Context *ctx = CTX(); StackTrace stack; stack.ObtainCurrent(thr, pc); - Lock l(&ctx->thread_mtx); + ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); if (!IsFiredSuppression(ctx, rep, stack)) { rep.AddStack(&stack); diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.cc b/compiler-rt/lib/tsan/rtl/tsan_report.cc index f52f4560fdb..b394c406dcc 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_report.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_report.cc @@ -138,7 +138,7 @@ static void PrintThread(const ReportThread *rt) { if (rt->id == 0) // Little sense in describing the main thread. return; Printf(" Thread T%d", rt->id); - if (rt->name) + if (rt->name && rt->name[0] != '\0') Printf(" '%s'", rt->name); char thrbuf[kThreadBufSize]; Printf(" (tid=%zu, %s) created by %s", diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc index e533a9ca01e..1a23eda779c 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc @@ -47,12 +47,29 @@ Context *CTX() { return ctx; } +static char thread_registry_placeholder[sizeof(ThreadRegistry)]; + +static ThreadContextBase *CreateThreadContext(u32 tid) { + StatInc(cur_thread(), StatThreadMaxTid); + // Map thread trace when context is created. + MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); + void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); + return new(mem) ThreadContext(tid); +} + +#ifndef TSAN_GO +static const u32 kThreadQuarantineSize = 16; +#else +static const u32 kThreadQuarantineSize = 64; +#endif + Context::Context() : initialized() , report_mtx(MutexTypeReport, StatMtxReport) , nreported() , nmissed_expected() - , thread_mtx(MutexTypeThreads, StatMtxThreads) + , thread_registry(new(thread_registry_placeholder) ThreadRegistry( + CreateThreadContext, kMaxTid, kThreadQuarantineSize)) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) , fired_suppressions(MBlockRacyAddresses) { @@ -77,43 +94,14 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, , tls_size(tls_size) { } -ThreadContext::ThreadContext(int tid) - : tid(tid) - , unique_id() - , os_id() - , user_id() - , thr() - , status(ThreadStatusInvalid) - , detached() - , reuse_count() - , epoch0() - , epoch1() - , dead_info() - , dead_next() - , name() { -} - static void WriteMemoryProfile(char *buf, uptr buf_size, int num) { uptr shadow = GetShadowMemoryConsumption(); - int nthread = 0; - int nlivethread = 0; - uptr threadmem = 0; - { - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - nthread += 1; - threadmem += sizeof(ThreadContext); - if (tctx->status != ThreadStatusRunning) - continue; - nlivethread += 1; - threadmem += sizeof(ThreadState); - } - } - + uptr n_threads; + uptr n_running_threads; + ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); + uptr threadmem = n_threads * sizeof(ThreadContext) + + n_running_threads * sizeof(ThreadState); uptr nsync = 0; uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync); @@ -122,7 +110,7 @@ static void WriteMemoryProfile(char *buf, uptr buf_size, int num) { " sync=%zuMB(cnt=%zu)\n", num, shadow >> 20, - threadmem >> 20, nthread, nlivethread, + threadmem >> 20, n_threads, n_running_threads, syncmem >> 20, nsync); } @@ -203,9 +191,6 @@ void Initialize(ThreadState *thr) { #ifndef TSAN_GO InitializeShadowMemory(); #endif - ctx->dead_list_size = 0; - ctx->dead_list_head = 0; - ctx->dead_list_tail = 0; InitializeFlags(&ctx->flags, env); // Setup correct file descriptor for error reports. if (internal_strcmp(flags()->log_path, "stdout") == 0) @@ -234,7 +219,6 @@ void Initialize(ThreadState *thr) { GetPid()); // Initialize thread 0. - ctx->thread_seq = 0; int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); ThreadStart(thr, tid, GetPid()); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index 64526360233..dbcede0b659 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -26,8 +26,9 @@ #ifndef TSAN_RTL_H #define TSAN_RTL_H -#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_thread_registry.h" #include "tsan_clock.h" #include "tsan_defs.h" #include "tsan_flags.h" @@ -410,41 +411,31 @@ INLINE ThreadState *cur_thread() { } #endif -enum ThreadStatus { - ThreadStatusInvalid, // Non-existent thread, data is invalid. - ThreadStatusCreated, // Created but not yet running. - ThreadStatusRunning, // The thread is currently running. - ThreadStatusFinished, // Joinable thread is finished but not yet joined. - ThreadStatusDead // Joined, but some info (trace) is still alive. -}; - // An info about a thread that is hold for some time after its termination. struct ThreadDeadInfo { Trace trace; }; -struct ThreadContext { - const int tid; - int unique_id; // Non-rolling thread id. - uptr os_id; // pid - uptr user_id; // Some opaque user thread id (e.g. pthread_t). +class ThreadContext : public ThreadContextBase { + public: + explicit ThreadContext(int tid); ThreadState *thr; - ThreadStatus status; - bool detached; - int reuse_count; + StackTrace creation_stack; SyncClock sync; // Epoch at which the thread had started. // If we see an event from the thread stamped by an older epoch, // the event is from a dead thread that shared tid with this thread. u64 epoch0; u64 epoch1; - StackTrace creation_stack; - int creation_tid; ThreadDeadInfo *dead_info; - ThreadContext *dead_next; // In dead thread list. - char *name; // As annotated by user. - explicit ThreadContext(int tid); + // Override superclass callbacks. + void OnDead(); + void OnJoined(void *arg); + void OnFinished(); + void OnStarted(void *arg); + void OnCreated(void *arg); + void OnReset(void *arg); }; struct RacyStacks { @@ -479,15 +470,7 @@ struct Context { int nreported; int nmissed_expected; - Mutex thread_mtx; - unsigned thread_seq; - unsigned unique_thread_seq; - int alive_threads; - int max_alive_threads; - ThreadContext *threads[kMaxTid]; - int dead_list_size; - ThreadContext* dead_list_head; - ThreadContext* dead_list_tail; + ThreadRegistry *thread_registry; Vector<RacyStacks> racy_stacks; Vector<RacyAddress> racy_addresses; @@ -543,6 +526,10 @@ void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { if (kCollectStats) thr->stat[typ] += n; } +void ALWAYS_INLINE INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { + if (kCollectStats) + thr->stat[typ] = n; +} void MapShadow(uptr addr, uptr size); void MapThreadTrace(uptr addr, uptr size); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc index a07f6a2a0bb..1f3c7ac9d25 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -63,7 +63,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { && s->owner_tid != SyncVar::kInvalidTid && !s->is_broken) { s->is_broken = true; - Lock l(&ctx->thread_mtx); + ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); rep.AddMutex(s); StackTrace trace; @@ -248,18 +248,19 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) { s->mtx.ReadUnlock(); } +static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { + ThreadState *thr = reinterpret_cast<ThreadState*>(arg); + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status == ThreadStatusRunning) + thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch()); + else + thr->clock.set(tctx->tid, tctx->epoch1); +} + void AcquireGlobal(ThreadState *thr, uptr pc) { - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - if (tctx->status == ThreadStatusRunning) - thr->clock.set(i, tctx->thr->fast_state.epoch()); - else - thr->clock.set(i, tctx->epoch1); - } + ThreadRegistryLock l(CTX()->thread_registry); + CTX()->thread_registry->RunCallbackForEachThreadLocked( + UpdateClockCallback, thr); } void Release(ThreadState *thr, uptr pc, uptr addr) { @@ -283,19 +284,20 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { } #ifndef TSAN_GO +static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { + ThreadState *thr = reinterpret_cast<ThreadState*>(arg); + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status == ThreadStatusRunning) + thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch()); + else + thr->last_sleep_clock.set(tctx->tid, tctx->epoch1); +} + void AfterSleep(ThreadState *thr, uptr pc) { - Context *ctx = CTX(); thr->last_sleep_stack_id = CurrentStackId(thr, pc); - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - if (tctx->status == ThreadStatusRunning) - thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch()); - else - thr->last_sleep_clock.set(i, tctx->epoch1); - } + ThreadRegistryLock l(CTX()->thread_registry); + CTX()->thread_registry->RunCallbackForEachThreadLocked( + UpdateSleepClockCallback, thr); } #endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc index de9a0e94cd3..69fd16bbd4c 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc @@ -125,7 +125,7 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) { ScopedReport::ScopedReport(ReportType typ) { ctx_ = CTX(); - ctx_->thread_mtx.CheckLocked(); + ctx_->thread_registry->CheckLocked(); void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); rep_ = new(mem) ReportDesc; rep_->typ = typ; @@ -177,7 +177,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, void ScopedReport::AddThread(const ThreadContext *tctx) { for (uptr i = 0; i < rep_->threads.Size(); i++) { - if (rep_->threads[i]->id == tctx->tid) + if ((u32)rep_->threads[i]->id == tctx->tid) return; } void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); @@ -187,17 +187,18 @@ void ScopedReport::AddThread(const ThreadContext *tctx) { rt->pid = tctx->os_id; rt->running = (tctx->status == ThreadStatusRunning); rt->name = tctx->name ? internal_strdup(tctx->name) : 0; - rt->parent_tid = tctx->creation_tid; + rt->parent_tid = tctx->parent_tid; rt->stack = SymbolizeStack(tctx->creation_stack); } #ifndef TSAN_GO -static ThreadContext *FindThread(int unique_id) { +static ThreadContext *FindThreadLocked(int unique_id) { Context *ctx = CTX(); - ctx->thread_mtx.CheckLocked(); + ctx->thread_registry->CheckLocked(); for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx && tctx->unique_id == unique_id) { + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(i)); + if (tctx && tctx->unique_id == (u32)unique_id) { return tctx; } } @@ -206,9 +207,10 @@ static ThreadContext *FindThread(int unique_id) { ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { Context *ctx = CTX(); - ctx->thread_mtx.CheckLocked(); + ctx->thread_registry->CheckLocked(); for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(i)); if (tctx == 0 || tctx->status != ThreadStatusRunning) continue; ThreadState *thr = tctx->thr; @@ -274,14 +276,14 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { trace.Init(stack, ssz); loc->stack = SymbolizeStack(trace); } - ThreadContext *tctx = FindThread(creat_tid); + ThreadContext *tctx = FindThreadLocked(creat_tid); if (tctx) AddThread(tctx); return; } if (allocator()->PointerIsMine((void*)addr)) { MBlock *b = user_mblock(0, (void*)addr); - ThreadContext *tctx = FindThread(b->alloc_tid); + ThreadContext *tctx = FindThreadLocked(b->alloc_tid); void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); ReportLocation *loc = new(mem) ReportLocation(); rep_->locs.PushBack(loc); @@ -341,7 +343,10 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. - ThreadContext *tctx = CTX()->threads[tid]; + Context *ctx = CTX(); + ctx->thread_registry->CheckLocked(); + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(tid)); if (tctx == 0) return; Trace* trace = 0; @@ -585,7 +590,7 @@ void ReportRace(ThreadState *thr) { } Context *ctx = CTX(); - Lock l0(&ctx->thread_mtx); + ThreadRegistryLock l0(ctx->thread_registry); ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace); const uptr kMop = 2; @@ -613,7 +618,8 @@ void ReportRace(ThreadState *thr) { for (uptr i = 0; i < kMop; i++) { FastState s(thr->racy_state[i]); - ThreadContext *tctx = ctx->threads[s.tid()]; + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(s.tid())); if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) continue; rep.AddThread(tctx); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc index f25fb412854..ad7d505695a 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc @@ -20,13 +20,124 @@ namespace __tsan { +// ThreadContext implementation. + +ThreadContext::ThreadContext(int tid) + : ThreadContextBase(tid) + , thr() + , sync() + , epoch0() + , epoch1() + , dead_info() { +} + +void ThreadContext::OnDead() { + sync.Reset(); +} + +void ThreadContext::OnJoined(void *arg) { + ThreadState *caller_thr = static_cast<ThreadState *>(arg); + caller_thr->clock.acquire(&sync); + StatInc(caller_thr, StatSyncAcquire); +} + +struct OnCreatedArgs { + ThreadState *thr; + uptr pc; +}; + +void ThreadContext::OnCreated(void *arg) { + thr = 0; + if (tid != 0) { + OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); + args->thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); + args->thr->clock.set(args->thr->tid, args->thr->fast_state.epoch()); + args->thr->fast_synch_epoch = args->thr->fast_state.epoch(); + args->thr->clock.release(&sync); + StatInc(args->thr, StatSyncRelease); + creation_stack.ObtainCurrent(args->thr, args->pc); + } +} + +void ThreadContext::OnReset(void *arg) { + OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); + StatInc(args->thr, StatThreadReuse); + sync.Reset(); +} + +struct OnStartedArgs { + ThreadState *thr; + uptr stk_addr; + uptr stk_size; + uptr tls_addr; + uptr tls_size; +}; + +void ThreadContext::OnStarted(void *arg) { + OnStartedArgs *args = static_cast<OnStartedArgs*>(arg); + // RoundUp so that one trace part does not contain events + // from different threads. + epoch0 = RoundUp(epoch1 + 1, kTracePartSize); + epoch1 = (u64)-1; + new(args->thr) ThreadState(CTX(), tid, unique_id, + epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); +#ifdef TSAN_GO + // Setup dynamic shadow stack. + const int kInitStackSize = 8; + args->thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, + kInitStackSize * sizeof(uptr)); + args->thr->shadow_stack_pos = thr->shadow_stack; + args->thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; +#endif +#ifndef TSAN_GO + AllocatorThreadStart(args->thr); +#endif + thr = args->thr; + thr->fast_synch_epoch = epoch0; + thr->clock.set(tid, epoch0); + thr->clock.acquire(&sync); + thr->fast_state.SetHistorySize(flags()->history_size); + const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); + thr->trace.headers[trace].epoch0 = epoch0; + StatInc(thr, StatSyncAcquire); + DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " + "tls_addr=%zx tls_size=%zx\n", + tid, (uptr)epoch0, stk_addr, stk_size, tls_addr, tls_size); + thr->is_alive = true; +} + +void ThreadContext::OnFinished() { + if (!detached) { + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.release(&sync); + StatInc(thr, StatSyncRelease); + } + // Save from info about the thread. + dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) + ThreadDeadInfo(); + for (uptr i = 0; i < TraceParts(); i++) { + dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0; + dead_info->trace.headers[i].stack0.CopyFrom( + thr->trace.headers[i].stack0); + } + epoch1 = thr->fast_state.epoch(); + #ifndef TSAN_GO -const int kThreadQuarantineSize = 16; -#else -const int kThreadQuarantineSize = 64; + AllocatorThreadFinish(thr); #endif + thr->~ThreadState(); + StatAggregate(CTX()->stat, thr->stat); + thr = 0; +} -static void MaybeReportThreadLeak(ThreadContext *tctx) { +static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *unused) { + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); if (tctx->detached) return; if (tctx->status != ThreadStatusCreated @@ -42,122 +153,27 @@ void ThreadFinalize(ThreadState *thr) { CHECK_GT(thr->in_rtl, 0); if (!flags()->report_thread_leaks) return; - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - MaybeReportThreadLeak(tctx); - } + ThreadRegistryLock l(CTX()->thread_registry); + CTX()->thread_registry->RunCallbackForEachThreadLocked( + MaybeReportThreadLeak, 0); } int ThreadCount(ThreadState *thr) { CHECK_GT(thr->in_rtl, 0); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - int cnt = 0; - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - if (tctx->status != ThreadStatusCreated - && tctx->status != ThreadStatusRunning) - continue; - cnt++; - } - return cnt; -} - -static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { - Context *ctx = CTX(); - CHECK_GT(thr->in_rtl, 0); - CHECK(tctx->status == ThreadStatusRunning - || tctx->status == ThreadStatusFinished); - DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id); - tctx->status = ThreadStatusDead; - tctx->user_id = 0; - tctx->sync.Reset(); - - // Put to dead list. - tctx->dead_next = 0; - if (ctx->dead_list_size == 0) - ctx->dead_list_head = tctx; - else - ctx->dead_list_tail->dead_next = tctx; - ctx->dead_list_tail = tctx; - ctx->dead_list_size++; + uptr result; + ctx->thread_registry->GetNumberOfThreads(0, 0, &result); + return (int)result; } int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { CHECK_GT(thr->in_rtl, 0); - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); StatInc(thr, StatThreadCreate); - int tid = -1; - ThreadContext *tctx = 0; - if (ctx->dead_list_size > kThreadQuarantineSize - || ctx->thread_seq >= kMaxTid) { - // Reusing old thread descriptor and tid. - if (ctx->dead_list_size == 0) { - Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", - kMaxTid); - Die(); - } - StatInc(thr, StatThreadReuse); - tctx = ctx->dead_list_head; - ctx->dead_list_head = tctx->dead_next; - ctx->dead_list_size--; - if (ctx->dead_list_size == 0) { - CHECK_EQ(tctx->dead_next, 0); - ctx->dead_list_head = 0; - } - CHECK_EQ(tctx->status, ThreadStatusDead); - tctx->status = ThreadStatusInvalid; - tctx->reuse_count++; - tctx->sync.Reset(); - tid = tctx->tid; - DestroyAndFree(tctx->dead_info); - if (tctx->name) { - internal_free(tctx->name); - tctx->name = 0; - } - } else { - // Allocating new thread descriptor and tid. - StatInc(thr, StatThreadMaxTid); - tid = ctx->thread_seq++; - void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); - tctx = new(mem) ThreadContext(tid); - ctx->threads[tid] = tctx; - MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); - } - CHECK_NE(tctx, 0); - CHECK_GE(tid, 0); - CHECK_LT(tid, kMaxTid); + Context *ctx = CTX(); + OnCreatedArgs args = { thr, pc }; + int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); - CHECK_EQ(tctx->status, ThreadStatusInvalid); - ctx->alive_threads++; - if (ctx->max_alive_threads < ctx->alive_threads) { - ctx->max_alive_threads++; - CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads); - StatInc(thr, StatThreadMaxAlive); - } - tctx->status = ThreadStatusCreated; - tctx->thr = 0; - tctx->user_id = uid; - tctx->unique_id = ctx->unique_thread_seq++; - tctx->detached = detached; - if (tid) { - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&tctx->sync); - StatInc(thr, StatSyncRelease); - tctx->creation_stack.ObtainCurrent(thr, pc); - tctx->creation_tid = thr->tid; - } + StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); return tid; } @@ -188,42 +204,8 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { } } - Lock l(&CTX()->thread_mtx); - ThreadContext *tctx = CTX()->threads[tid]; - CHECK_NE(tctx, 0); - CHECK_EQ(tctx->status, ThreadStatusCreated); - tctx->status = ThreadStatusRunning; - tctx->os_id = os_id; - // RoundUp so that one trace part does not contain events - // from different threads. - tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize); - tctx->epoch1 = (u64)-1; - new(thr) ThreadState(CTX(), tid, tctx->unique_id, - tctx->epoch0, stk_addr, stk_size, - tls_addr, tls_size); -#ifdef TSAN_GO - // Setup dynamic shadow stack. - const int kInitStackSize = 8; - thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, - kInitStackSize * sizeof(uptr)); - thr->shadow_stack_pos = thr->shadow_stack; - thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; -#endif -#ifndef TSAN_GO - AllocatorThreadStart(thr); -#endif - tctx->thr = thr; - thr->fast_synch_epoch = tctx->epoch0; - thr->clock.set(tid, tctx->epoch0); - thr->clock.acquire(&tctx->sync); - thr->fast_state.SetHistorySize(flags()->history_size); - const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts(); - thr->trace.headers[trace].epoch0 = tctx->epoch0; - StatInc(thr, StatSyncAcquire); - DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " - "tls_addr=%zx tls_size=%zx\n", - tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size); - thr->is_alive = true; + OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; + CTX()->thread_registry->StartThread(tid, os_id, &args); } void ThreadFinish(ThreadState *thr) { @@ -242,57 +224,22 @@ void ThreadFinish(ThreadState *thr) { } thr->is_alive = false; Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[thr->tid]; - CHECK_NE(tctx, 0); - CHECK_EQ(tctx->status, ThreadStatusRunning); - CHECK_GT(ctx->alive_threads, 0); - ctx->alive_threads--; - if (tctx->detached) { - ThreadDead(thr, tctx); - } else { - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&tctx->sync); - StatInc(thr, StatSyncRelease); - tctx->status = ThreadStatusFinished; - } + ctx->thread_registry->FinishThread(thr->tid); +} - // Save from info about the thread. - tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) - ThreadDeadInfo(); - for (uptr i = 0; i < TraceParts(); i++) { - tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0; - tctx->dead_info->trace.headers[i].stack0.CopyFrom( - thr->trace.headers[i].stack0); +static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { + tctx->user_id = 0; + return true; } - tctx->epoch1 = thr->fast_state.epoch(); - -#ifndef TSAN_GO - AllocatorThreadFinish(thr); -#endif - thr->~ThreadState(); - StatAggregate(ctx->stat, thr->stat); - tctx->thr = 0; + return false; } int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { CHECK_GT(thr->in_rtl, 0); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - int res = -1; - for (unsigned tid = 0; tid < kMaxTid; tid++) { - ThreadContext *tctx = ctx->threads[tid]; - if (tctx != 0 && tctx->user_id == uid - && tctx->status != ThreadStatusInvalid) { - tctx->user_id = 0; - res = tid; - break; - } - } + int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); return res; } @@ -303,18 +250,7 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) { CHECK_LT(tid, kMaxTid); DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[tid]; - if (tctx->status == ThreadStatusInvalid) { - Printf("ThreadSanitizer: join of non-existent thread\n"); - return; - } - // FIXME(dvyukov): print message and continue (it's user error). - CHECK_EQ(tctx->detached, false); - CHECK_EQ(tctx->status, ThreadStatusFinished); - thr->clock.acquire(&tctx->sync); - StatInc(thr, StatSyncAcquire); - ThreadDead(thr, tctx); + ctx->thread_registry->JoinThread(tid, thr); } void ThreadDetach(ThreadState *thr, uptr pc, int tid) { @@ -322,31 +258,12 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) { CHECK_GT(tid, 0); CHECK_LT(tid, kMaxTid); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[tid]; - if (tctx->status == ThreadStatusInvalid) { - Printf("ThreadSanitizer: detach of non-existent thread\n"); - return; - } - if (tctx->status == ThreadStatusFinished) { - ThreadDead(thr, tctx); - } else { - tctx->detached = true; - } + ctx->thread_registry->DetachThread(tid); } void ThreadSetName(ThreadState *thr, const char *name) { - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[thr->tid]; - CHECK_NE(tctx, 0); - CHECK_EQ(tctx->status, ThreadStatusRunning); - if (tctx->name) { - internal_free(tctx->name); - tctx->name = 0; - } - if (name) - tctx->name = internal_strdup(name); + CHECK_GT(thr->in_rtl, 0); + CTX()->thread_registry->SetThreadName(thr->tid, name); } void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |