diff options
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r-- | compiler-rt/lib/tsan/lit_tests/ignore_sync.cc | 30 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc | 10 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc | 33 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl.cc | 14 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl.h | 8 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc | 106 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc | 20 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_stat.cc | 2 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_stat.h | 2 |
9 files changed, 161 insertions, 64 deletions
diff --git a/compiler-rt/lib/tsan/lit_tests/ignore_sync.cc b/compiler-rt/lib/tsan/lit_tests/ignore_sync.cc new file mode 100644 index 00000000000..67f2d906d9c --- /dev/null +++ b/compiler-rt/lib/tsan/lit_tests/ignore_sync.cc @@ -0,0 +1,30 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && not %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> + +extern "C" void AnnotateIgnoreSyncBegin(const char*, int); +extern "C" void AnnotateIgnoreSyncEnd(const char*, int); + +int Global; +pthread_mutex_t Mutex = PTHREAD_MUTEX_INITIALIZER; + +void *Thread(void *x) { + AnnotateIgnoreSyncBegin(0, 0); + pthread_mutex_lock(&Mutex); + Global++; + pthread_mutex_unlock(&Mutex); + AnnotateIgnoreSyncEnd(0, 0); + return 0; +} + +int main() { + pthread_t t; + pthread_create(&t, 0, Thread, 0); + pthread_mutex_lock(&Mutex); + Global++; + pthread_mutex_unlock(&Mutex); + pthread_join(t, 0); +} + +// CHECK: WARNING: ThreadSanitizer: data race + diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc index 954a53163be..cacbc0281d0 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc @@ -401,6 +401,16 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { ThreadIgnoreEnd(thr); } +void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin); + ThreadIgnoreSyncBegin(thr); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd); + ThreadIgnoreSyncEnd(thr); +} + void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( char *f, int l, uptr addr, uptr size) { SCOPED_ANNOTATION(AnnotatePublishMemoryRange); diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc index 24ce428839d..d9f8cdf5b10 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -251,11 +251,10 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, // Assume the access is atomic. if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) { MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); - return *a; + return *a; // as if atomic } SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); T v = *a; s->mtx.ReadUnlock(); __sync_synchronize(); @@ -273,13 +272,15 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, // Strictly saying even relaxed store cuts off release sequence, // so must reset the clock. if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) { - *a = v; + *a = v; // as if atomic return; } __sync_synchronize(); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.ReleaseStore(&s->clock); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseImpl(thr, pc, &s->clock); *a = v; s->mtx.Unlock(); // Trainling memory barrier to provide sequential consistency @@ -293,13 +294,15 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { SyncVar *s = 0; if (mo != mo_relaxed) { s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); if (IsAcqRelOrder(mo)) - thr->clock.acq_rel(&s->clock); + AcquireReleaseImpl(thr, pc, &s->clock); else if (IsReleaseOrder(mo)) - thr->clock.release(&s->clock); + ReleaseImpl(thr, pc, &s->clock); else if (IsAcquireOrder(mo)) - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); } v = F(a, v); if (s) @@ -357,13 +360,15 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, SyncVar *s = 0; if (mo != mo_relaxed) { s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); if (IsAcqRelOrder(mo)) - thr->clock.acq_rel(&s->clock); + AcquireReleaseImpl(thr, pc, &s->clock); else if (IsReleaseOrder(mo)) - thr->clock.release(&s->clock); + ReleaseImpl(thr, pc, &s->clock); else if (IsAcquireOrder(mo)) - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); } T cc = *c; T pr = func_cas(a, cc, v); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc index cb95df4e90a..9ecfd590201 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc @@ -699,7 +699,7 @@ void FuncExit(ThreadState *thr) { void ThreadIgnoreBegin(ThreadState *thr) { DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); thr->ignore_reads_and_writes++; - CHECK_GE(thr->ignore_reads_and_writes, 0); + CHECK_GT(thr->ignore_reads_and_writes, 0); thr->fast_state.SetIgnoreBit(); } @@ -711,6 +711,18 @@ void ThreadIgnoreEnd(ThreadState *thr) { thr->fast_state.ClearIgnoreBit(); } +void ThreadIgnoreSyncBegin(ThreadState *thr) { + DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); + thr->ignore_sync++; + CHECK_GT(thr->ignore_sync, 0); +} + +void ThreadIgnoreSyncEnd(ThreadState *thr) { + DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); + thr->ignore_sync--; + CHECK_GE(thr->ignore_sync, 0); +} + bool MD5Hash::operator==(const MD5Hash &other) const { return hash[0] == other.hash[0] && hash[1] == other.hash[1]; } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index 9169aeb400c..fc12f5d765d 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -412,6 +412,7 @@ struct ThreadState { // We do not distinguish beteween ignoring reads and writes // for better performance. int ignore_reads_and_writes; + int ignore_sync; uptr *shadow_stack_pos; u64 *racy_shadow_addr; u64 racy_state[2]; @@ -680,8 +681,11 @@ void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); + void ThreadIgnoreBegin(ThreadState *thr); void ThreadIgnoreEnd(ThreadState *thr); +void ThreadIgnoreSyncBegin(ThreadState *thr); +void ThreadIgnoreSyncEnd(ThreadState *thr); void FuncEntry(ThreadState *thr, uptr pc); void FuncExit(ThreadState *thr); @@ -711,6 +715,10 @@ void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); void AfterSleep(ThreadState *thr, uptr pc); +void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); +void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); +void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); +void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); // The hacky call uses custom calling convention and an assembly thunk. // It is considerably faster that a normal call for the caller diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc index c4bb0442dbb..98f32c2ad0e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -100,11 +100,8 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { } if (s->recursion == 0) { StatInc(thr, StatMutexLock); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.acquire(&s->clock); - StatInc(thr, StatSyncAcquire); - thr->clock.acquire(&s->read_clock); - StatInc(thr, StatSyncAcquire); + AcquireImpl(thr, pc, &s->clock); + AcquireImpl(thr, pc, &s->read_clock); } else if (!s->is_recursive) { StatInc(thr, StatMutexRecLock); } @@ -141,10 +138,12 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.ReleaseStore(&s->clock); - StatInc(thr, StatSyncRelease); + if (thr->ignore_sync == 0) { + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.ReleaseStore(&s->clock); + StatInc(thr, StatSyncRelease); + } } else { StatInc(thr, StatMutexRecUnlock); } @@ -168,10 +167,8 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { addr); PrintCurrentStack(thr, pc); } - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); s->last_lock = thr->fast_state.raw(); - StatInc(thr, StatSyncAcquire); thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); s->mtx.ReadUnlock(); } @@ -190,10 +187,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { addr); PrintCurrentStack(thr, pc); } - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&s->read_clock); - StatInc(thr, StatSyncRelease); + ReleaseImpl(thr, pc, &s->read_clock); s->mtx.Unlock(); thr->mset.Del(s->GetId(), false); } @@ -211,10 +205,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { StatInc(thr, StatMutexReadUnlock); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&s->read_clock); - StatInc(thr, StatSyncRelease); + ReleaseImpl(thr, pc, &s->read_clock); } else if (s->owner_tid == thr->tid) { // Seems to be write unlock. thr->fast_state.IncrementEpoch(); @@ -224,14 +215,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; - // FIXME: Refactor me, plz. - // The sequence of events is quite tricky and doubled in several places. - // First, it's a bug to increment the epoch w/o writing to the trace. - // Then, the acquire/release logic can be factored out as well. - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.ReleaseStore(&s->clock); - StatInc(thr, StatSyncRelease); + ReleaseImpl(thr, pc, &s->clock); } else { StatInc(thr, StatMutexRecUnlock); } @@ -248,10 +232,10 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { void Acquire(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: Acquire %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.acquire(&s->clock); - StatInc(thr, StatSyncAcquire); + AcquireImpl(thr, pc, &s->clock); s->mtx.ReadUnlock(); } @@ -265,6 +249,9 @@ static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { } void AcquireGlobal(ThreadState *thr, uptr pc) { + DPrintf("#%d: AcquireGlobal\n", thr->tid); + if (thr->ignore_sync) + return; ThreadRegistryLock l(CTX()->thread_registry); CTX()->thread_registry->RunCallbackForEachThreadLocked( UpdateClockCallback, thr); @@ -273,20 +260,26 @@ void AcquireGlobal(ThreadState *thr, uptr pc) { void Release(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: Release %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.release(&s->clock); - StatInc(thr, StatSyncRelease); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseImpl(thr, pc, &s->clock); s->mtx.Unlock(); } void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.ReleaseStore(&s->clock); - StatInc(thr, StatSyncRelease); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseStoreImpl(thr, pc, &s->clock); s->mtx.Unlock(); } @@ -301,6 +294,9 @@ static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { } void AfterSleep(ThreadState *thr, uptr pc) { + DPrintf("#%d: AfterSleep %zx\n", thr->tid); + if (thr->ignore_sync) + return; thr->last_sleep_stack_id = CurrentStackId(thr, pc); ThreadRegistryLock l(CTX()->thread_registry); CTX()->thread_registry->RunCallbackForEachThreadLocked( @@ -308,4 +304,40 @@ void AfterSleep(ThreadState *thr, uptr pc) { } #endif +void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->clock.acquire(c); + StatInc(thr, StatSyncAcquire); +} + +void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.release(c); + StatInc(thr, StatSyncRelease); +} + +void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.ReleaseStore(c); + StatInc(thr, StatSyncRelease); +} + +void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.acq_rel(c); + StatInc(thr, StatSyncAcquire); + StatInc(thr, StatSyncRelease); +} + } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc index 81e1b0a2acb..3ed1457ef1e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc @@ -41,8 +41,7 @@ void ThreadContext::OnDead() { void ThreadContext::OnJoined(void *arg) { ThreadState *caller_thr = static_cast<ThreadState *>(arg); - caller_thr->clock.acquire(&sync); - StatInc(caller_thr, StatSyncAcquire); + AcquireImpl(caller_thr, 0, &sync); sync.Reset(); } @@ -59,10 +58,7 @@ void ThreadContext::OnCreated(void *arg) { args->thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); - args->thr->clock.set(args->thr->tid, args->thr->fast_state.epoch()); - args->thr->fast_synch_epoch = args->thr->fast_state.epoch(); - args->thr->clock.release(&sync); - StatInc(args->thr, StatSyncRelease); + ReleaseImpl(args->thr, 0, &sync); #ifdef TSAN_GO creation_stack.ObtainCurrent(args->thr, args->pc); #else @@ -108,8 +104,7 @@ void ThreadContext::OnStarted(void *arg) { #endif thr = args->thr; thr->fast_synch_epoch = epoch0; - thr->clock.set(tid, epoch0); - thr->clock.acquire(&sync); + AcquireImpl(thr, 0, &sync); thr->fast_state.SetHistorySize(flags()->history_size); const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); Trace *thr_trace = ThreadTrace(thr->tid); @@ -128,10 +123,7 @@ void ThreadContext::OnFinished() { thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&sync); - StatInc(thr, StatSyncRelease); + ReleaseImpl(thr, 0, &sync); } epoch1 = thr->fast_state.epoch(); @@ -170,6 +162,10 @@ static void ThreadCheckIgnore(ThreadState *thr) { Printf("ThreadSanitizer: thread T%d finished with ignores enabled.\n", thr->tid); } + if (thr->ignore_sync) { + Printf("ThreadSanitizer: thread T%d finished with sync ignores enabled.\n", + thr->tid); + } } void ThreadFinalize(ThreadState *thr) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_stat.cc b/compiler-rt/lib/tsan/rtl/tsan_stat.cc index 4fa91a69de3..30b4a9529b6 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_stat.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_stat.cc @@ -405,6 +405,8 @@ void StatOutput(u64 *stat) { name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd "; name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin "; name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd "; + name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin "; + name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd "; name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange "; name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange "; name[StatAnnotateThreadName] = " ThreadName "; diff --git a/compiler-rt/lib/tsan/rtl/tsan_stat.h b/compiler-rt/lib/tsan/rtl/tsan_stat.h index b97aee80a37..9129b221b48 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_stat.h +++ b/compiler-rt/lib/tsan/rtl/tsan_stat.h @@ -401,6 +401,8 @@ enum StatType { StatAnnotateIgnoreReadsEnd, StatAnnotateIgnoreWritesBegin, StatAnnotateIgnoreWritesEnd, + StatAnnotateIgnoreSyncBegin, + StatAnnotateIgnoreSyncEnd, StatAnnotatePublishMemoryRange, StatAnnotateUnpublishMemoryRange, StatAnnotateThreadName, |