diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2014-05-29 13:50:54 +0000 |
---|---|---|
committer | Dmitry Vyukov <dvyukov@google.com> | 2014-05-29 13:50:54 +0000 |
commit | bde4c9c773eed5993d70dcc1f164ad033019ce83 (patch) | |
tree | f69ffcafcdf1a95d97d24271f12640b4fcf926e9 | |
parent | 53ae251a1714858e9d268da7a74988b0d24032c0 (diff) | |
download | bcm5719-llvm-bde4c9c773eed5993d70dcc1f164ad033019ce83.tar.gz bcm5719-llvm-bde4c9c773eed5993d70dcc1f164ad033019ce83.zip |
tsan: refactor storage of meta information for heap blocks and sync objects
The new storage (MetaMap) is based on direct shadow (instead of a hashmap + per-block lists).
This solves a number of problems:
- eliminates quadratic behaviour in SyncTab::GetAndLock (https://code.google.com/p/thread-sanitizer/issues/detail?id=26)
- eliminates contention in SyncTab
- eliminates contention in internal allocator during allocation of sync objects
- removes a bunch of ad-hoc code in java interface
- reduces java shadow from 2x to 1/2x
- allows to memorize heap block meta info for Java and Go
- allows to cleanup sync object meta info for Go
- which in turn enabled deadlock detector for Go
llvm-svn: 209810
39 files changed, 965 insertions, 788 deletions
diff --git a/compiler-rt/lib/tsan/CMakeLists.txt b/compiler-rt/lib/tsan/CMakeLists.txt index 3a71e9ac432..3126d5286ec 100644 --- a/compiler-rt/lib/tsan/CMakeLists.txt +++ b/compiler-rt/lib/tsan/CMakeLists.txt @@ -38,6 +38,7 @@ set(TSAN_SOURCES rtl/tsan_rtl_mutex.cc rtl/tsan_rtl_report.cc rtl/tsan_rtl_thread.cc + rtl/tsan_stack_trace.cc rtl/tsan_stat.cc rtl/tsan_suppressions.cc rtl/tsan_symbolize.cc @@ -54,6 +55,7 @@ endif() set(TSAN_HEADERS rtl/tsan_clock.h rtl/tsan_defs.h + rtl/tsan_dense_alloc.h rtl/tsan_fd.h rtl/tsan_flags.h rtl/tsan_ignoreset.h @@ -67,6 +69,7 @@ set(TSAN_HEADERS rtl/tsan_platform.h rtl/tsan_report.h rtl/tsan_rtl.h + rtl/tsan_stack_trace.h rtl/tsan_stat.h rtl/tsan_suppressions.h rtl/tsan_symbolize.h diff --git a/compiler-rt/lib/tsan/check_memcpy.sh b/compiler-rt/lib/tsan/check_memcpy.sh index fe3e49ee308..101df1166b7 100755 --- a/compiler-rt/lib/tsan/check_memcpy.sh +++ b/compiler-rt/lib/tsan/check_memcpy.sh @@ -17,7 +17,14 @@ EXE=$SRC.exe $CXX $SRC $CFLAGS -c -o $OBJ $CXX $OBJ $LDFLAGS -o $EXE -NCALL=$(objdump -d $EXE | egrep "callq .*__interceptor_mem(cpy|set)" | wc -l) +NCALL=$(objdump -d $EXE | egrep "callq .*<__interceptor_mem(cpy|set)>" | wc -l) +if [ "$NCALL" != "0" ]; then + echo FAIL: found $NCALL memcpy/memset calls + exit 1 +fi + +# tail calls +NCALL=$(objdump -d $EXE | egrep "jmpq .*<__interceptor_mem(cpy|set)>" | wc -l) if [ "$NCALL" != "0" ]; then echo FAIL: found $NCALL memcpy/memset calls exit 1 diff --git a/compiler-rt/lib/tsan/go/build.bat b/compiler-rt/lib/tsan/go/build.bat index bc567843ad4..a54bed93de6 100644 --- a/compiler-rt/lib/tsan/go/build.bat +++ b/compiler-rt/lib/tsan/go/build.bat @@ -1,4 +1,4 @@ -type tsan_go.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc > gotsan.cc +type tsan_go.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc > gotsan.cc gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 -Wno-error=attributes -Wno-attributes -Wno-format -DTSAN_DEBUG=0 -O3 -fomit-frame-pointer diff --git a/compiler-rt/lib/tsan/go/buildgo.sh b/compiler-rt/lib/tsan/go/buildgo.sh index f9db35fc8cd..2c7f3f63903 100755 --- a/compiler-rt/lib/tsan/go/buildgo.sh +++ b/compiler-rt/lib/tsan/go/buildgo.sh @@ -12,6 +12,7 @@ SRCS=" ../rtl/tsan_rtl_mutex.cc ../rtl/tsan_rtl_report.cc ../rtl/tsan_rtl_thread.cc + ../rtl/tsan_stack_trace.cc ../rtl/tsan_stat.cc ../rtl/tsan_suppressions.cc ../rtl/tsan_sync.cc diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.cc b/compiler-rt/lib/tsan/rtl/tsan_clock.cc index d40f40f05a0..e140a3cb95c 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_clock.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_clock.cc @@ -330,6 +330,11 @@ SyncClock::SyncClock() void SyncClock::Reset() { clk_.Reset(); + Zero(); +} + +void SyncClock::Zero() { + clk_.Resize(0); release_store_tid_ = kInvalidTid; release_store_reused_ = 0; for (uptr i = 0; i < kDirtyTids; i++) diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.h b/compiler-rt/lib/tsan/rtl/tsan_clock.h index 931fde80b07..f7ab69a3a81 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_clock.h +++ b/compiler-rt/lib/tsan/rtl/tsan_clock.h @@ -38,6 +38,7 @@ class SyncClock { } void Reset(); + void Zero(); void DebugDump(int(*printf)(const char *s, ...)); diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h index 0ee19e92265..35f4e9432d1 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_defs.h +++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -66,6 +66,13 @@ const uptr kShadowSize = 8; // Shadow memory is kShadowMultiplier times larger than user memory. const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell; +// That many user bytes are mapped onto a single meta shadow cell. +// Must be less or equal to minimal memory allocator alignment. +const uptr kMetaShadowCell = 8; + +// Size of a single meta shadow value (u32). +const uptr kMetaShadowSize = 4; + #if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY const bool kCollectHistory = false; #else @@ -167,7 +174,15 @@ struct ReportStack; class ReportDesc; class RegionAlloc; class StackTrace; -struct MBlock; + +// Descriptor of user's memory block. +struct MBlock { + u64 siz; + u32 stk; + u16 tid; +}; + +COMPILER_CHECK(sizeof(MBlock) == 16); } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h new file mode 100644 index 00000000000..2c2e75e4438 --- /dev/null +++ b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h @@ -0,0 +1,136 @@ +//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects. +// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc. +// The only difference with traditional slab allocators is that DenseSlabAlloc +// allocates/free indices of objects and provide a functionality to map +// the index onto the real pointer. The index is u32, that is, 2 times smaller +// than uptr (hense the Dense prefix). +//===----------------------------------------------------------------------===// +#ifndef TSAN_DENSE_ALLOC_H +#define TSAN_DENSE_ALLOC_H + +#include "sanitizer_common/sanitizer_common.h" +#include "tsan_defs.h" +#include "tsan_mutex.h" + +namespace __tsan { + +class DenseSlabAllocCache { + static const uptr kSize = 128; + typedef u32 IndexT; + uptr pos; + IndexT cache[kSize]; + template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc; +}; + +template<typename T, uptr kL1Size, uptr kL2Size> +class DenseSlabAlloc { + public: + typedef DenseSlabAllocCache Cache; + typedef typename Cache::IndexT IndexT; + + DenseSlabAlloc() { + // Check that kL1Size and kL2Size are sane. + CHECK_EQ(kL1Size & (kL1Size - 1), 0); + CHECK_EQ(kL2Size & (kL2Size - 1), 0); + CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size); + // Check that it makes sense to use the dense alloc. + CHECK_GE(sizeof(T), sizeof(IndexT)); + internal_memset(map_, 0, sizeof(map_)); + freelist_ = 0; + fillpos_ = 0; + } + + ~DenseSlabAlloc() { + for (uptr i = 0; i < kL1Size; i++) { + if (map_[i] != 0) + UnmapOrDie(map_[i], kL2Size * sizeof(T)); + } + } + + IndexT Alloc(Cache *c) { + if (c->pos == 0) + Refill(c); + return c->cache[--c->pos]; + } + + void Free(Cache *c, IndexT idx) { + if (c->pos == Cache::kSize) + Drain(c); + c->cache[c->pos++] = idx; + } + + T *Map(IndexT idx) { + DCHECK_NE(idx, 0); + DCHECK_LE(idx, kL1Size * kL2Size); + return &map_[idx / kL2Size][idx % kL2Size]; + } + + void FlushCache(Cache *c) { + SpinMutexLock lock(&mtx_); + while (c->pos) { + IndexT idx = c->cache[--c->pos]; + *(IndexT*)Map(idx) = freelist_; + freelist_ = idx; + } + } + + void InitCache(Cache *c) { + c->pos = 0; + internal_memset(c->cache, 0, sizeof(c->cache)); + } + + private: + T *map_[kL1Size]; + SpinMutex mtx_; + IndexT freelist_; + uptr fillpos_; + + void Refill(Cache *c) { + SpinMutexLock lock(&mtx_); + if (freelist_ == 0) { + if (fillpos_ == kL1Size) { + Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n"); + Die(); + } + T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator"); + // Reserve 0 as invalid index. + IndexT start = fillpos_ == 0 ? 1 : 0; + for (IndexT i = start; i < kL2Size; i++) { + new(batch + i) T(); + *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; + } + *(IndexT*)(batch + kL2Size - 1) = 0; + freelist_ = fillpos_ * kL2Size + start; + map_[fillpos_++] = batch; + } + for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) { + IndexT idx = freelist_; + c->cache[c->pos++] = idx; + freelist_ = *(IndexT*)Map(idx); + } + } + + void Drain(Cache *c) { + SpinMutexLock lock(&mtx_); + for (uptr i = 0; i < Cache::kSize / 2; i++) { + IndexT idx = c->cache[--c->pos]; + *(IndexT*)Map(idx) = freelist_; + freelist_ = idx; + } + } +}; + +} // namespace __tsan + +#endif // TSAN_DENSE_ALLOC_H diff --git a/compiler-rt/lib/tsan/rtl/tsan_fd.cc b/compiler-rt/lib/tsan/rtl/tsan_fd.cc index 6c7fc174ae1..68242e03ce1 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_fd.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_fd.cc @@ -47,8 +47,8 @@ static bool bogusfd(int fd) { return fd < 0 || fd >= kTableSize; } -static FdSync *allocsync() { - FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync)); +static FdSync *allocsync(ThreadState *thr, uptr pc) { + FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync)); atomic_store(&s->rc, 1, memory_order_relaxed); return s; } @@ -65,10 +65,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) { CHECK_NE(s, &fdctx.globsync); CHECK_NE(s, &fdctx.filesync); CHECK_NE(s, &fdctx.socksync); - SyncVar *v = ctx->synctab.GetAndRemove(thr, pc, (uptr)s); - if (v) - DestroyAndFree(v); - internal_free(s); + user_free(thr, pc, s); } } } @@ -219,7 +216,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) { void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) { DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd); - FdSync *s = allocsync(); + FdSync *s = allocsync(thr, pc); init(thr, pc, rfd, ref(s)); init(thr, pc, wfd, ref(s)); unref(thr, pc, s); @@ -229,7 +226,7 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd); if (bogusfd(fd)) return; - init(thr, pc, fd, allocsync()); + init(thr, pc, fd, allocsync(thr, pc)); } void FdSignalCreate(ThreadState *thr, uptr pc, int fd) { @@ -250,7 +247,7 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd); if (bogusfd(fd)) return; - init(thr, pc, fd, allocsync()); + init(thr, pc, fd, allocsync(thr, pc)); } void FdSocketCreate(ThreadState *thr, uptr pc, int fd) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc index 60e4b3aed63..8d294e017c2 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc @@ -191,6 +191,7 @@ ScopedInterceptor::~ScopedInterceptor() { if (!thr_->ignore_interceptors) { ProcessPendingSignals(thr_); FuncExit(thr_); + CheckNoLocks(thr_); } } @@ -1705,7 +1706,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool sigact, ScopedReport rep(ReportTypeErrnoInSignal); if (!IsFiredSuppression(ctx, rep, stack)) { rep.AddStack(&stack, true); - OutputReport(ctx, rep); + OutputReport(thr, rep); } } errno = saved_errno; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc index 56323235051..a1725cb898e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc @@ -40,6 +40,7 @@ class ScopedAnnotation { ~ScopedAnnotation() { FuncExit(thr_); + CheckNoLocks(thr_); } private: ThreadState *const thr_; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc index 2de0c4f11b3..7fbc9c67d13 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -291,7 +291,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); return NoTsanAtomicLoad(a, mo); } - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false); AcquireImpl(thr, pc, &s->clock); T v = NoTsanAtomicLoad(a, mo); s->mtx.ReadUnlock(); @@ -325,7 +325,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, return; } __sync_synchronize(); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); @@ -339,7 +339,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); SyncVar *s = 0; if (mo != mo_relaxed) { - s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); @@ -463,7 +463,7 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, SyncVar *s = 0; bool write_lock = mo != mo_acquire && mo != mo_consume; if (mo != mo_relaxed) { - s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); + s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc index d0c003ea743..ee610188292 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc @@ -22,54 +22,17 @@ using namespace __tsan; // NOLINT -namespace __tsan { - -const uptr kHeapShadow = 0x300000000000ull; -const uptr kHeapAlignment = 8; +const jptr kHeapAlignment = 8; -struct BlockDesc { - bool begin; - Mutex mtx; - SyncVar *head; - - BlockDesc() - : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock) - , head() { - CHECK_EQ(begin, false); - begin = true; - } - - ~BlockDesc() { - CHECK_EQ(begin, true); - begin = false; - ThreadState *thr = cur_thread(); - SyncVar *s = head; - while (s) { - SyncVar *s1 = s->next; - StatInc(thr, StatSyncDestroyed); - s->mtx.Lock(); - s->mtx.Unlock(); - thr->mset.Remove(s->GetId()); - DestroyAndFree(s); - s = s1; - } - } -}; +namespace __tsan { struct JavaContext { const uptr heap_begin; const uptr heap_size; - BlockDesc *heap_shadow; JavaContext(jptr heap_begin, jptr heap_size) : heap_begin(heap_begin) , heap_size(heap_size) { - uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc); - heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size); - if ((uptr)heap_shadow != kHeapShadow) { - Printf("ThreadSanitizer: failed to mmap Java heap shadow\n"); - Die(); - } } }; @@ -93,63 +56,6 @@ class ScopedJavaFunc { static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1]; static JavaContext *jctx; -static BlockDesc *getblock(uptr addr) { - uptr i = (addr - jctx->heap_begin) / kHeapAlignment; - return &jctx->heap_shadow[i]; -} - -static uptr USED getmem(BlockDesc *b) { - uptr i = b - jctx->heap_shadow; - uptr p = jctx->heap_begin + i * kHeapAlignment; - CHECK_GE(p, jctx->heap_begin); - CHECK_LT(p, jctx->heap_begin + jctx->heap_size); - return p; -} - -static BlockDesc *getblockbegin(uptr addr) { - for (BlockDesc *b = getblock(addr);; b--) { - CHECK_GE(b, jctx->heap_shadow); - if (b->begin) - return b; - } - return 0; -} - -SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, - bool write_lock, bool create) { - if (jctx == 0 || addr < jctx->heap_begin - || addr >= jctx->heap_begin + jctx->heap_size) - return 0; - BlockDesc *b = getblockbegin(addr); - DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b); - Lock l(&b->mtx); - SyncVar *s = b->head; - for (; s; s = s->next) { - if (s->addr == addr) { - DPrintf("#%d: found existing sync for %p\n", thr->tid, addr); - break; - } - } - if (s == 0 && create) { - DPrintf("#%d: creating new sync for %p\n", thr->tid, addr); - s = ctx->synctab.Create(thr, pc, addr); - s->next = b->head; - b->head = s; - } - if (s) { - if (write_lock) - s->mtx.Lock(); - else - s->mtx.ReadLock(); - } - return s; -} - -SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) { - // We do not destroy Java mutexes other than in __tsan_java_free(). - return 0; -} - } // namespace __tsan #define SCOPED_JAVA_FUNC(func) \ @@ -192,8 +98,7 @@ void __tsan_java_alloc(jptr ptr, jptr size) { CHECK_GE(ptr, jctx->heap_begin); CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); - BlockDesc *b = getblock(ptr); - new(b) BlockDesc(); + OnUserAlloc(thr, pc, ptr, size, false); } void __tsan_java_free(jptr ptr, jptr size) { @@ -206,12 +111,7 @@ void __tsan_java_free(jptr ptr, jptr size) { CHECK_GE(ptr, jctx->heap_begin); CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); - BlockDesc *beg = getblock(ptr); - BlockDesc *end = getblock(ptr + size); - for (BlockDesc *b = beg; b != end; b++) { - if (b->begin) - b->~BlockDesc(); - } + ctx->metamap.FreeRange(thr, pc, ptr, size); } void __tsan_java_move(jptr src, jptr dst, jptr size) { @@ -230,35 +130,15 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) { // Assuming it's not running concurrently with threads that do // memory accesses and mutex operations (stop-the-world phase). - { // NOLINT - BlockDesc *s = getblock(src); - BlockDesc *d = getblock(dst); - BlockDesc *send = getblock(src + size); - for (; s != send; s++, d++) { - CHECK_EQ(d->begin, false); - if (s->begin) { - DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d)); - new(d) BlockDesc; - d->head = s->head; - for (SyncVar *sync = d->head; sync; sync = sync->next) { - uptr newaddr = sync->addr - src + dst; - DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr); - sync->addr = newaddr; - } - s->head = 0; - s->~BlockDesc(); - } - } - } - - { // NOLINT - u64 *s = (u64*)MemToShadow(src); - u64 *d = (u64*)MemToShadow(dst); - u64 *send = (u64*)MemToShadow(src + size); - for (; s != send; s++, d++) { - *d = *s; - *s = 0; - } + ctx->metamap.MoveMemory(src, dst, size); + + // Move shadow. + u64 *s = (u64*)MemToShadow(src); + u64 *d = (u64*)MemToShadow(dst); + u64 *send = (u64*)MemToShadow(src + size); + for (; s != send; s++, d++) { + *d = *s; + *s = 0; } } diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/compiler-rt/lib/tsan/rtl/tsan_mman.cc index 4deeab5bfb3..8c037e155fe 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cc @@ -29,32 +29,6 @@ extern "C" void WEAK __tsan_free_hook(void *ptr) { namespace __tsan { -COMPILER_CHECK(sizeof(MBlock) == 16); - -void MBlock::Lock() { - atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this); - uptr v = atomic_load(a, memory_order_relaxed); - for (int iter = 0;; iter++) { - if (v & 1) { - if (iter < 10) - proc_yield(20); - else - internal_sched_yield(); - v = atomic_load(a, memory_order_relaxed); - continue; - } - if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire)) - break; - } -} - -void MBlock::Unlock() { - atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this); - uptr v = atomic_load(a, memory_order_relaxed); - DCHECK(v & 1); - atomic_store(a, v & ~1, memory_order_relaxed); -} - struct MapUnmapCallback { void OnMap(uptr p, uptr size) const { } void OnUnmap(uptr p, uptr size) const { @@ -96,7 +70,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ScopedReport rep(ReportTypeSignalUnsafe); if (!IsFiredSuppression(ctx, rep, stack)) { rep.AddStack(&stack, true); - OutputReport(ctx, rep); + OutputReport(thr, rep); } } @@ -106,43 +80,36 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); if (p == 0) return 0; - MBlock *b = new(allocator()->GetMetaData(p)) MBlock; - b->Init(sz, thr->tid, CurrentStackId(thr, pc)); - if (ctx && ctx->initialized) { - if (thr->ignore_reads_and_writes == 0) - MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); - else - MemoryResetRange(thr, pc, (uptr)p, sz); - } - DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); + if (ctx && ctx->initialized) + OnUserAlloc(thr, pc, (uptr)p, sz, true); SignalUnsafeCall(thr, pc); return p; } void user_free(ThreadState *thr, uptr pc, void *p) { - CHECK_NE(p, (void*)0); - DPrintf("#%d: free(%p)\n", thr->tid, p); - MBlock *b = (MBlock*)allocator()->GetMetaData(p); - if (b->ListHead()) { - MBlock::ScopedLock l(b); - for (SyncVar *s = b->ListHead(); s;) { - SyncVar *res = s; - s = s->next; - StatInc(thr, StatSyncDestroyed); - res->mtx.Lock(); - res->mtx.Unlock(); - DestroyAndFree(res); - } - b->ListReset(); - } - if (ctx && ctx->initialized) { - if (thr->ignore_reads_and_writes == 0) - MemoryRangeFreed(thr, pc, (uptr)p, b->Size()); - } + if (ctx && ctx->initialized) + OnUserFree(thr, pc, (uptr)p, true); allocator()->Deallocate(&thr->alloc_cache, p); SignalUnsafeCall(thr, pc); } +void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { + DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); + ctx->metamap.AllocBlock(thr, pc, p, sz); + if (write && thr->ignore_reads_and_writes == 0) + MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); + else + MemoryResetRange(thr, pc, (uptr)p, sz); +} + +void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { + CHECK_NE(p, (void*)0); + uptr sz = ctx->metamap.FreeBlock(thr, pc, p); + DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz); + if (write && thr->ignore_reads_and_writes == 0) + MemoryRangeFreed(thr, pc, (uptr)p, sz); +} + void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { void *p2 = 0; // FIXME: Handle "shrinking" more efficiently, @@ -152,9 +119,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { if (p2 == 0) return 0; if (p) { - MBlock *b = user_mblock(thr, p); - CHECK_NE(b, 0); - internal_memcpy(p2, p, min(b->Size(), sz)); + uptr oldsz = user_alloc_usable_size(thr, pc, p); + internal_memcpy(p2, p, min(oldsz, sz)); } } if (p) @@ -165,17 +131,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) { if (p == 0) return 0; - MBlock *b = (MBlock*)allocator()->GetMetaData(p); - return b ? b->Size() : 0; -} - -MBlock *user_mblock(ThreadState *thr, void *p) { - CHECK_NE(p, 0); - Allocator *a = allocator(); - void *b = a->GetBlockBegin(p); - if (b == 0) - return 0; - return (MBlock*)a->GetMetaData(b); + MBlock *b = ctx->metamap.GetBlock((uptr)p); + return b ? b->siz : 0; } void invoke_malloc_hook(void *ptr, uptr size) { @@ -247,16 +204,14 @@ bool __tsan_get_ownership(void *p) { uptr __tsan_get_allocated_size(void *p) { if (p == 0) return 0; - p = allocator()->GetBlockBegin(p); - if (p == 0) - return 0; - MBlock *b = (MBlock*)allocator()->GetMetaData(p); - return b->Size(); + MBlock *b = ctx->metamap.GetBlock((uptr)p); + return b->siz; } void __tsan_on_thread_idle() { ThreadState *thr = cur_thread(); allocator()->SwallowCache(&thr->alloc_cache); internal_allocator()->SwallowCache(&thr->internal_alloc_cache); + ctx->metamap.OnThreadIdle(thr); } } // extern "C" diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.h b/compiler-rt/lib/tsan/rtl/tsan_mman.h index 19d555437f3..67c6ee5eaa2 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.h +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.h @@ -32,9 +32,6 @@ void user_free(ThreadState *thr, uptr pc, void *p); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p); -// Given the pointer p into a valid allocated block, -// returns the descriptor of the block. -MBlock *user_mblock(ThreadState *thr, void *p); // Invoking malloc/free hooks that may be installed by the user. void invoke_malloc_hook(void *ptr, uptr size); @@ -62,7 +59,6 @@ enum MBlockType { MBlockSuppression, MBlockExpectRace, MBlockSignal, - MBlockFD, MBlockJmpBuf, // This must be the last. diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutex.cc b/compiler-rt/lib/tsan/rtl/tsan_mutex.cc index 2c162082ad0..344c7f19499 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mutex.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_mutex.cc @@ -31,13 +31,13 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { /*0 MutexTypeInvalid*/ {}, /*1 MutexTypeTrace*/ {MutexTypeLeaf}, /*2 MutexTypeThreads*/ {MutexTypeReport}, - /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar, + /*3 MutexTypeReport*/ {MutexTypeSyncVar, MutexTypeMBlock, MutexTypeJavaMBlock}, /*4 MutexTypeSyncVar*/ {MutexTypeDDetector}, - /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, + /*5 MutexTypeSyncTab*/ {}, // unused /*6 MutexTypeSlab*/ {MutexTypeLeaf}, /*7 MutexTypeAnnotations*/ {}, - /*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, + /*8 MutexTypeAtExit*/ {MutexTypeSyncVar}, /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, /*11 MutexTypeDDetector*/ {}, @@ -161,8 +161,20 @@ void InternalDeadlockDetector::Unlock(MutexType t) { CHECK(locked_[t]); locked_[t] = 0; } + +void InternalDeadlockDetector::CheckNoLocks() { + for (int i = 0; i != MutexTypeCount; i++) { + CHECK_EQ(locked_[i], 0); + } +} #endif +void CheckNoLocks(ThreadState *thr) { +#if TSAN_DEBUG && !TSAN_GO + thr->internal_deadlock_detector.CheckNoLocks(); +#endif +} + const uptr kUnlocked = 0; const uptr kWriteLock = 1; const uptr kReadLock = 2; diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutex.h b/compiler-rt/lib/tsan/rtl/tsan_mutex.h index 12580fa9bd0..7bb1c48fcac 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mutex.h +++ b/compiler-rt/lib/tsan/rtl/tsan_mutex.h @@ -71,6 +71,7 @@ class InternalDeadlockDetector { InternalDeadlockDetector(); void Lock(MutexType t); void Unlock(MutexType t); + void CheckNoLocks(); private: u64 seq_; u64 locked_[MutexTypeCount]; @@ -78,6 +79,10 @@ class InternalDeadlockDetector { void InitializeMutex(); +// Checks that the current thread does not hold any runtime locks +// (e.g. when returning from an interceptor). +void CheckNoLocks(ThreadState *thr); + } // namespace __tsan #endif // TSAN_MUTEX_H diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h index 7abe5f0d706..3801d9323e6 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform.h +++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -16,7 +16,9 @@ C++ linux memory layout: 0000 0000 0000 - 03c0 0000 0000: protected 03c0 0000 0000 - 1000 0000 0000: shadow -1000 0000 0000 - 6000 0000 0000: protected +1000 0000 0000 - 3000 0000 0000: protected +3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) +4000 0000 0000 - 6000 0000 0000: protected 6000 0000 0000 - 6200 0000 0000: traces 6200 0000 0000 - 7d00 0000 0000: - 7d00 0000 0000 - 7e00 0000 0000: heap @@ -27,7 +29,9 @@ C++ COMPAT linux memory layout: 0400 0000 0000 - 1000 0000 0000: shadow 1000 0000 0000 - 2900 0000 0000: protected 2900 0000 0000 - 2c00 0000 0000: modules -2c00 0000 0000 - 6000 0000 0000: - +2c00 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) +4000 0000 0000 - 6000 0000 0000: - 6000 0000 0000 - 6200 0000 0000: traces 6200 0000 0000 - 7d00 0000 0000: - 7d00 0000 0000 - 7e00 0000 0000: heap @@ -40,7 +44,9 @@ Go linux and darwin memory layout: 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 1000 0000 0000: - 1000 0000 0000 - 1380 0000 0000: shadow -1460 0000 0000 - 6000 0000 0000: - +1460 0000 0000 - 2000 0000 0000: - +3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) +4000 0000 0000 - 6000 0000 0000: - 6000 0000 0000 - 6200 0000 0000: traces 6200 0000 0000 - 7fff ffff ffff: - @@ -51,7 +57,8 @@ Go windows memory layout: 00e0 0000 0000 - 0100 0000 0000: - 0100 0000 0000 - 0560 0000 0000: shadow 0560 0000 0000 - 0760 0000 0000: traces -0760 0000 0000 - 07ff ffff ffff: - +0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects) +07d0 0000 0000 - 07ff ffff ffff: - */ #ifndef TSAN_PLATFORM_H @@ -68,20 +75,28 @@ static const uptr kLinuxAppMemBeg = 0x000000000000ULL; static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL; # if SANITIZER_WINDOWS static const uptr kLinuxShadowMsk = 0x010000000000ULL; -# else +static const uptr kMetaShadow = 0x076000000000ULL; +static const uptr kMetaSize = 0x007000000000ULL; +# else // if SANITIZER_WINDOWS static const uptr kLinuxShadowMsk = 0x200000000000ULL; -# endif +static const uptr kMetaShadow = 0x300000000000ULL; +static const uptr kMetaSize = 0x100000000000ULL; +# endif // if SANITIZER_WINDOWS +#else // defined(TSAN_GO) +static const uptr kMetaShadow = 0x300000000000ULL; +static const uptr kMetaSize = 0x100000000000ULL; // TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout, // when memory addresses are of the 0x2axxxxxxxxxx form. // The option is enabled with 'setarch x86_64 -L'. -#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW +# if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW static const uptr kLinuxAppMemBeg = 0x290000000000ULL; static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL; static const uptr kAppMemGapBeg = 0x2c0000000000ULL; static const uptr kAppMemGapEnd = 0x7d0000000000ULL; -#else +# else static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL; static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL; +# endif #endif static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL; @@ -96,10 +111,16 @@ const uptr kTraceMemSize = 0x020000000000ULL; // This has to be a macro to allow constant initialization of constants below. #ifndef TSAN_GO #define MemToShadow(addr) \ - (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt) + ((((uptr)addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt) +#define MemToMeta(addr) \ + (u32*)(((((uptr)addr) & ~(kLinuxAppMemMsk | (kMetaShadowCell - 1))) \ + / kMetaShadowCell * kMetaShadowSize) | kMetaShadow) #else #define MemToShadow(addr) \ - ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk) + (((((uptr)addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk) +#define MemToMeta(addr) \ + (u32*)(((((uptr)addr) & ~(kMetaShadowCell - 1)) \ + / kMetaShadowCell * kMetaShadowSize) | kMetaShadow) #endif static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg); diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc index 3c3a58b0d3d..7ad81d72622 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc @@ -61,34 +61,49 @@ namespace __tsan { const uptr kPageSize = 4096; +enum { + MemTotal = 0, + MemShadow = 1, + MemMeta = 2, + MemFile = 3, + MemMmap = 4, + MemTrace = 5, + MemHeap = 6, + MemOther = 7, + MemCount = 8, +}; + void FillProfileCallback(uptr start, uptr rss, bool file, uptr *mem, uptr stats_size) { - CHECK_EQ(7, stats_size); - mem[6] += rss; // total + mem[MemTotal] += rss; start >>= 40; - if (start < 0x10) // shadow - mem[0] += rss; - else if (start >= 0x20 && start < 0x30) // compat modules - mem[file ? 1 : 2] += rss; - else if (start >= 0x7e) // modules - mem[file ? 1 : 2] += rss; - else if (start >= 0x60 && start < 0x62) // traces - mem[3] += rss; - else if (start >= 0x7d && start < 0x7e) // heap - mem[4] += rss; - else // other - mem[5] += rss; + if (start < 0x10) + mem[MemShadow] += rss; + else if (start >= 0x20 && start < 0x30) + mem[file ? MemFile : MemMmap] += rss; + else if (start >= 0x30 && start < 0x40) + mem[MemMeta] += rss; + else if (start >= 0x7e) + mem[file ? MemFile : MemMmap] += rss; + else if (start >= 0x60 && start < 0x62) + mem[MemTrace] += rss; + else if (start >= 0x7d && start < 0x7e) + mem[MemHeap] += rss; + else + mem[MemOther] += rss; } void WriteMemoryProfile(char *buf, uptr buf_size) { - uptr mem[7] = {}; + uptr mem[MemCount] = {}; __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); char *buf_pos = buf; char *buf_end = buf + buf_size; buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos, - "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n", - mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20, - mem[3] >> 20, mem[4] >> 20, mem[5] >> 20); + "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" + " trace:%zd heap:%zd other:%zd\n", + mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, + mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, + mem[MemHeap] >> 20, mem[MemOther] >> 20); struct mallinfo mi = __libc_mallinfo(); buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos, "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n", @@ -123,9 +138,7 @@ static void ProtectRange(uptr beg, uptr end) { Die(); } } -#endif -#ifndef TSAN_GO // Mark shadow for .rodata sections with the special kShadowRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { @@ -184,6 +197,7 @@ static void MapRodata() { } void InitializeShadowMemory() { + // Map memory shadow. uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); if (shadow != kLinuxShadowBeg) { @@ -192,23 +206,48 @@ void InitializeShadowMemory() { "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg); Die(); } + DPrintf("memory shadow: %zx-%zx (%zuGB)\n", + kLinuxShadowBeg, kLinuxShadowEnd, + (kLinuxShadowEnd - kLinuxShadowBeg) >> 30); + + // Map meta shadow. + if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) { + Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n", + kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow); + Die(); + } + if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) { + Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n", + kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize); + Die(); + } + uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize); + if (meta != kMetaShadow) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", meta, kMetaShadow); + Die(); + } + DPrintf("meta shadow: %zx-%zx (%zuGB)\n", + kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30); + + // Protect gaps. const uptr kClosedLowBeg = 0x200000; const uptr kClosedLowEnd = kLinuxShadowBeg - 1; const uptr kClosedMidBeg = kLinuxShadowEnd + 1; - const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin); + const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin), + kMetaShadow); + ProtectRange(kClosedLowBeg, kClosedLowEnd); ProtectRange(kClosedMidBeg, kClosedMidEnd); - DPrintf("kClosedLow %zx-%zx (%zuGB)\n", + VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n", kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30); - DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n", - kLinuxShadowBeg, kLinuxShadowEnd, - (kLinuxShadowEnd - kLinuxShadowBeg) >> 30); - DPrintf("kClosedMid %zx-%zx (%zuGB)\n", + VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n", kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30); - DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n", + VPrintf(2, "app mem: %zx-%zx (%zuGB)\n", kLinuxAppMemBeg, kLinuxAppMemEnd, (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30); - DPrintf("stack %zx\n", (uptr)&shadow); + VPrintf(2, "stack: %zx\n", (uptr)&shadow); MapRodata(); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc index 39e78c07bec..b4c564f5da2 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc @@ -131,6 +131,7 @@ static void BackgroundThread(void *arg) { fd_t mprof_fd = kInvalidFd; if (flags()->profile_memory && flags()->profile_memory[0]) { + // FIXME(dvyukov): support stdout/stderr InternalScopedBuffer<char> filename(4096); internal_snprintf(filename.data(), filename.size(), "%s.%d", flags()->profile_memory, (int)internal_getpid()); @@ -144,6 +145,7 @@ static void BackgroundThread(void *arg) { } u64 last_flush = NanoTime(); + u64 last_rss_check = NanoTime(); uptr last_rss = 0; for (int i = 0; atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; @@ -160,7 +162,9 @@ static void BackgroundThread(void *arg) { last_flush = NanoTime(); } } - if (flags()->memory_limit_mb > 0) { + // GetRSS can be expensive on huge programs, so don't do it every 100ms. + if (flags()->memory_limit_mb > 0 && last_rss_check + 1000 * kMs2Ns < now) { + last_rss_check = now; uptr rss = GetRSS(); uptr limit = uptr(flags()->memory_limit_mb) << 20; if (flags()->verbosity > 0) { @@ -222,6 +226,22 @@ void MapShadow(uptr addr, uptr size) { // so we can get away with unaligned mapping. // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); + + // Meta shadow is 2:1, so tread carefully. + static uptr mapped_meta_end = 0; + uptr meta_begin = (uptr)MemToMeta(addr); + uptr meta_end = (uptr)MemToMeta(addr + size); + // windows wants 64K alignment + meta_begin = RoundDownTo(meta_begin, 64 << 10); + meta_end = RoundUpTo(meta_end, 64 << 10); + if (meta_end <= mapped_meta_end) + return; + if (meta_begin < mapped_meta_end) + meta_begin = mapped_meta_end; + MmapFixedNoReserve(meta_begin, meta_end - meta_begin); + mapped_meta_end = meta_end; + DPrintf("mapped meta shadow for (%p-%p) at (%p-%p)\n", + addr, addr+size, meta_begin, meta_end); } void MapThreadTrace(uptr addr, uptr size) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index ed0e0b89028..9a44a040ec2 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -44,6 +44,7 @@ #include "tsan_platform.h" #include "tsan_mutexset.h" #include "tsan_ignoreset.h" +#include "tsan_stack_trace.h" #if SANITIZER_WORDSIZE != 64 # error "ThreadSanitizer is supported only on 64-bit platforms" @@ -51,77 +52,6 @@ namespace __tsan { -// Descriptor of user's memory block. -struct MBlock { - /* - u64 mtx : 1; // must be first - u64 lst : 44; - u64 stk : 31; // on word boundary - u64 tid : kTidBits; - u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 - */ - u64 raw[2]; - - void Init(uptr siz, u32 tid, u32 stk) { - raw[0] = raw[1] = 0; - raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); - raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); - raw[0] |= (u64)stk << (1 + 44); - raw[1] |= (u64)stk >> (64 - 44 - 1); - DCHECK_EQ(Size(), siz); - DCHECK_EQ(Tid(), tid); - DCHECK_EQ(StackId(), stk); - } - - u32 Tid() const { - return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); - } - - uptr Size() const { - return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); - } - - u32 StackId() const { - return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); - } - - SyncVar *ListHead() const { - return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); - } - - void ListPush(SyncVar *v) { - SyncVar *lst = ListHead(); - v->next = lst; - u64 x = (u64)v ^ (u64)lst; - x = (x >> 3) << 1; - raw[0] ^= x; - DCHECK_EQ(ListHead(), v); - } - - SyncVar *ListPop() { - SyncVar *lst = ListHead(); - SyncVar *nxt = lst->next; - lst->next = 0; - u64 x = (u64)lst ^ (u64)nxt; - x = (x >> 3) << 1; - raw[0] ^= x; - DCHECK_EQ(ListHead(), nxt); - return lst; - } - - void ListReset() { - SyncVar *lst = ListHead(); - u64 x = (u64)lst; - x = (x >> 3) << 1; - raw[0] ^= x; - DCHECK_EQ(ListHead(), 0); - } - - void Lock(); - void Unlock(); - typedef GenericScopedLock<MBlock> ScopedLock; -}; - #ifndef TSAN_GO #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW const uptr kAllocatorSpace = 0x7d0000000000ULL; @@ -131,7 +61,7 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL; const uptr kAllocatorSize = 0x10000000000ULL; // 1T. struct MapUnmapCallback; -typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), +typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0, DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; @@ -457,6 +387,9 @@ struct ThreadState { bool in_signal_handler; SignalContext *signal_ctx; + DenseSlabAllocCache block_cache; + DenseSlabAllocCache sync_cache; + #ifndef TSAN_GO u32 last_sleep_stack_id; ThreadClock last_sleep_clock; @@ -530,7 +463,7 @@ struct Context { bool initialized; bool after_multithreaded_fork; - SyncTab synctab; + MetaMap metamap; Mutex report_mtx; int nreported; @@ -628,7 +561,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc); void ForkChildAfter(ThreadState *thr, uptr pc); void ReportRace(ThreadState *thr); -bool OutputReport(Context *ctx, const ScopedReport &srep); +bool OutputReport(ThreadState *thr, const ScopedReport &srep); bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, const StackTrace &trace); @@ -657,9 +590,8 @@ void PrintCurrentStackSlow(); // uses libunwind void Initialize(ThreadState *thr); int Finalize(ThreadState *thr); -SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, - bool write_lock, bool create); -SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); +void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); +void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc index 66b789da756..bbee57350ee 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -59,7 +59,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, trace.ObtainCurrent(thr, pc); rep.AddStack(&trace, true); rep.AddLocation(addr, 1); - OutputReport(ctx, rep); + OutputReport(thr, rep); } void MutexCreate(ThreadState *thr, uptr pc, uptr addr, @@ -72,10 +72,12 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->is_rw = rw; s->is_recursive = recursive; s->is_linker_init = linker_init; + if (kCppMode && s->creation_stack_id == 0) + s->creation_stack_id = CurrentStackId(thr, pc); s->mtx.Unlock(); } @@ -88,37 +90,54 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { if (IsGlobalVar(addr)) return; #endif - SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr); - if (s == 0) - return; - if (flags()->detect_deadlocks) { - Callback cb(thr, pc); - ctx->dd->MutexDestroy(&cb, &s->dd); - } if (IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + if (s == 0) + return; + if (flags()->detect_deadlocks) { + Callback cb(thr, pc); + ctx->dd->MutexDestroy(&cb, &s->dd); + ctx->dd->MutexInit(&cb, &s->dd); + } + bool unlock_locked = false; if (flags()->report_destroy_locked && s->owner_tid != SyncVar::kInvalidTid && !s->is_broken) { s->is_broken = true; + unlock_locked = true; + } + u64 mid = s->GetId(); + u32 last_lock = s->last_lock; + if (!unlock_locked) + s->Reset(); // must not reset it before the report is printed + s->mtx.Unlock(); + if (unlock_locked) { ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(s); + rep.AddMutex(mid); StackTrace trace; trace.ObtainCurrent(thr, pc); rep.AddStack(&trace); - FastState last(s->last_lock); + FastState last(last_lock); RestoreStack(last.tid(), last.epoch(), &trace, 0); rep.AddStack(&trace, true); - rep.AddLocation(s->addr, 1); - OutputReport(ctx, rep); + rep.AddLocation(addr, 1); + OutputReport(thr, rep); + } + if (unlock_locked) { + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + if (s != 0) { + s->Reset(); + s->mtx.Unlock(); + } } - thr->mset.Remove(s->GetId()); - DestroyAndFree(s); + thr->mset.Remove(mid); + // s will be destroyed and freed in MetaMap::FreeBlock. } void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { @@ -126,7 +145,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { CHECK_GT(rec, 0); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); bool report_double_lock = false; @@ -170,7 +189,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); int rec = 0; @@ -213,7 +232,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) { StatInc(thr, StatMutexReadLock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); bool report_bad_lock = false; @@ -248,7 +267,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { StatInc(thr, StatMutexReadUnlock); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); bool report_bad_unlock = false; @@ -279,7 +298,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); bool write = true; bool report_bad_unlock = false; if (s->owner_tid == SyncVar::kInvalidTid) { @@ -324,7 +343,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->owner_tid = SyncVar::kInvalidTid; s->recursion = 0; s->mtx.Unlock(); @@ -334,7 +353,7 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); AcquireImpl(thr, pc, &s->clock); s->mtx.ReadUnlock(); } @@ -361,7 +380,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); @@ -373,7 +392,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); @@ -465,7 +484,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { rep.AddStack(&stacks[i], true); } } - OutputReport(ctx, rep); + OutputReport(thr, rep); } } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc index b3e502e8e84..b75c319e004 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc @@ -179,7 +179,8 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, mop->write = s.IsWrite(); mop->atomic = s.IsAtomic(); mop->stack = SymbolizeStack(*stack); - mop->stack->suppressable = true; + if (mop->stack) + mop->stack->suppressable = true; for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); u64 mid = this->AddMutex(d.id); @@ -279,7 +280,7 @@ u64 ScopedReport::AddMutex(u64 id) { u64 uid = 0; u64 mid = id; uptr addr = SyncVar::SplitId(id, &uid); - SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false); + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); // Check that the mutex is still alive. // Another mutex can be created at the same address, // so check uid as well. @@ -290,7 +291,7 @@ u64 ScopedReport::AddMutex(u64 id) { AddDeadMutex(id); } if (s) - s->mtx.ReadUnlock(); + s->mtx.Unlock(); return mid; } @@ -330,21 +331,26 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { return; } MBlock *b = 0; - if (allocator()->PointerIsMine((void*)addr) - && (b = user_mblock(0, (void*)addr))) { - ThreadContext *tctx = FindThreadByTidLocked(b->Tid()); + Allocator *a = allocator(); + if (a->PointerIsMine((void*)addr)) { + void *block_begin = a->GetBlockBegin((void*)addr); + if (block_begin) + b = ctx->metamap.GetBlock((uptr)block_begin); + } + if (b != 0) { + ThreadContext *tctx = FindThreadByTidLocked(b->tid); void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); ReportLocation *loc = new(mem) ReportLocation(); rep_->locs.PushBack(loc); loc->type = ReportLocationHeap; loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); - loc->size = b->Size(); - loc->tid = tctx ? tctx->tid : b->Tid(); + loc->size = b->siz; + loc->tid = tctx ? tctx->tid : b->tid; loc->name = 0; loc->file = 0; loc->line = 0; loc->stack = 0; - loc->stack = SymbolizeStackId(b->StackId()); + loc->stack = SymbolizeStackId(b->stk); if (tctx) AddThread(tctx); return; @@ -500,7 +506,7 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], } } -bool OutputReport(Context *ctx, const ScopedReport &srep) { +bool OutputReport(ThreadState *thr, const ScopedReport &srep) { atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); const ReportDesc *rep = srep.GetReport(); Suppression *supp = 0; @@ -517,8 +523,14 @@ bool OutputReport(Context *ctx, const ScopedReport &srep) { FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; ctx->fired_suppressions.push_back(s); } - if (OnReport(rep, suppress_pc != 0)) - return false; + { + bool old_is_freeing = thr->is_freeing; + thr->is_freeing = false; + bool suppressed = OnReport(rep, suppress_pc != 0); + thr->is_freeing = old_is_freeing; + if (suppressed) + return false; + } PrintReport(rep); ctx->nreported++; if (flags()->halt_on_error) @@ -616,6 +628,8 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) { } void ReportRace(ThreadState *thr) { + CheckNoLocks(thr); + // Symbolizer makes lots of intercepted calls. If we try to process them, // at best it will cause deadlocks on internal mutexes. ScopedIgnoreInterceptors ignore; @@ -700,7 +714,7 @@ void ReportRace(ThreadState *thr) { } #endif - if (!OutputReport(ctx, rep)) + if (!OutputReport(thr, rep)) return; AddRacyStacks(thr, traces, addr_min, addr_max); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc index 3b416c04550..6f7e0a913cb 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc @@ -207,7 +207,7 @@ void ThreadFinalize(ThreadState *thr) { ScopedReport rep(ReportTypeThreadLeak); rep.AddThread(leaks[i].tctx, true); rep.SetCount(leaks[i].count); - OutputReport(ctx, rep); + OutputReport(thr, rep); } #endif } diff --git a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc new file mode 100644 index 00000000000..a8374f4285d --- /dev/null +++ b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc @@ -0,0 +1,112 @@ +//===-- tsan_stack_trace.cc -----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +//#include "sanitizer_common/sanitizer_placement_new.h" +#include "tsan_stack_trace.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" + +namespace __tsan { + +StackTrace::StackTrace() + : n_() + , s_() + , c_() { +} + +StackTrace::StackTrace(uptr *buf, uptr cnt) + : n_() + , s_(buf) + , c_(cnt) { + CHECK_NE(buf, 0); + CHECK_NE(cnt, 0); +} + +StackTrace::~StackTrace() { + Reset(); +} + +void StackTrace::Reset() { + if (s_ && !c_) { + CHECK_NE(n_, 0); + internal_free(s_); + s_ = 0; + } + n_ = 0; +} + +void StackTrace::Init(const uptr *pcs, uptr cnt) { + Reset(); + if (cnt == 0) + return; + if (c_) { + CHECK_NE(s_, 0); + CHECK_LE(cnt, c_); + } else { + s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0])); + } + n_ = cnt; + internal_memcpy(s_, pcs, cnt * sizeof(s_[0])); +} + +void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) { + Reset(); + n_ = thr->shadow_stack_pos - thr->shadow_stack; + if (n_ + !!toppc == 0) + return; + uptr start = 0; + if (c_) { + CHECK_NE(s_, 0); + if (n_ + !!toppc > c_) { + start = n_ - c_ + !!toppc; + n_ = c_ - !!toppc; + } + } else { + // Cap potentially huge stacks. + if (n_ + !!toppc > kTraceStackSize) { + start = n_ - kTraceStackSize + !!toppc; + n_ = kTraceStackSize - !!toppc; + } + s_ = (uptr*)internal_alloc(MBlockStackTrace, + (n_ + !!toppc) * sizeof(s_[0])); + } + for (uptr i = 0; i < n_; i++) + s_[i] = thr->shadow_stack[start + i]; + if (toppc) { + s_[n_] = toppc; + n_++; + } +} + +void StackTrace::CopyFrom(const StackTrace& other) { + Reset(); + Init(other.Begin(), other.Size()); +} + +bool StackTrace::IsEmpty() const { + return n_ == 0; +} + +uptr StackTrace::Size() const { + return n_; +} + +uptr StackTrace::Get(uptr i) const { + CHECK_LT(i, n_); + return s_[i]; +} + +const uptr *StackTrace::Begin() const { + return s_; +} + +} // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h new file mode 100644 index 00000000000..fe82f6e6059 --- /dev/null +++ b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h @@ -0,0 +1,54 @@ +//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_STACK_TRACE_H +#define TSAN_STACK_TRACE_H + +//#include "sanitizer_common/sanitizer_atomic.h" +//#include "sanitizer_common/sanitizer_common.h" +//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" +#include "tsan_defs.h" +//#include "tsan_clock.h" +//#include "tsan_mutex.h" +//#include "tsan_dense_alloc.h" + +namespace __tsan { + +class StackTrace { + public: + StackTrace(); + // Initialized the object in "static mode", + // in this mode it never calls malloc/free but uses the provided buffer. + StackTrace(uptr *buf, uptr cnt); + ~StackTrace(); + void Reset(); + + void Init(const uptr *pcs, uptr cnt); + void ObtainCurrent(ThreadState *thr, uptr toppc); + bool IsEmpty() const; + uptr Size() const; + uptr Get(uptr i) const; + const uptr *Begin() const; + void CopyFrom(const StackTrace& other); + + private: + uptr n_; + uptr *s_; + const uptr c_; + + StackTrace(const StackTrace&); + void operator = (const StackTrace&); +}; + +} // namespace __tsan + +#endif // TSAN_STACK_TRACE_H diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cc b/compiler-rt/lib/tsan/rtl/tsan_sync.cc index 5d71f9ff4aa..9e514045dec 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cc @@ -19,293 +19,192 @@ namespace __tsan { void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s); -SyncVar::SyncVar(uptr addr, u64 uid) - : mtx(MutexTypeSyncVar, StatMtxSyncVar) - , addr(addr) - , uid(uid) - , creation_stack_id() - , owner_tid(kInvalidTid) - , last_lock() - , recursion() - , is_rw() - , is_recursive() - , is_broken() - , is_linker_init() { +SyncVar::SyncVar() + : mtx(MutexTypeSyncVar, StatMtxSyncVar) { + Reset(); } -SyncTab::Part::Part() - : mtx(MutexTypeSyncTab, StatMtxSyncTab) - , val() { -} +void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { + this->addr = addr; + this->uid = uid; -SyncTab::SyncTab() { + creation_stack_id = 0; + if (kCppMode) // Go does not use them + creation_stack_id = CurrentStackId(thr, pc); + if (flags()->detect_deadlocks) + DDMutexInit(thr, pc, this); } -SyncTab::~SyncTab() { - for (int i = 0; i < kPartCount; i++) { - while (tab_[i].val) { - SyncVar *tmp = tab_[i].val; - tab_[i].val = tmp->next; - DestroyAndFree(tmp); - } - } -} +void SyncVar::Reset() { + addr = 0; + uid = 0; + creation_stack_id = 0; + owner_tid = kInvalidTid; + last_lock = 0; + recursion = 0; + is_rw = 0; + is_recursive = 0; + is_broken = 0; + is_linker_init = 0; + next = 0; -SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock) { - return GetAndLock(thr, pc, addr, write_lock, true); + clock.Zero(); + read_clock.Reset(); } -SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) { - return GetAndLock(0, 0, addr, write_lock, false); +MetaMap::MetaMap() { + atomic_store(&uid_gen_, 0, memory_order_relaxed); } -SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) { - StatInc(thr, StatSyncCreated); - void *mem = internal_alloc(MBlockSync, sizeof(SyncVar)); - const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); - SyncVar *res = new(mem) SyncVar(addr, uid); - res->creation_stack_id = 0; - if (!kGoMode) // Go does not use them - res->creation_stack_id = CurrentStackId(thr, pc); - if (flags()->detect_deadlocks) - DDMutexInit(thr, pc, res); - return res; +void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { + u32 idx = block_alloc_.Alloc(&thr->block_cache); + MBlock *b = block_alloc_.Map(idx); + b->siz = sz; + b->tid = thr->tid; + b->stk = CurrentStackId(thr, pc); + u32 *meta = MemToMeta(p); + DCHECK_EQ(*meta, 0); + *meta = idx | kFlagBlock; } -SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock, bool create) { -#ifndef TSAN_GO - { // NOLINT - SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create); - if (res) - return res; - } - - // Here we ask only PrimaryAllocator, because - // SecondaryAllocator::PointerIsMine() is slow and we have fallback on - // the hashmap anyway. - if (PrimaryAllocator::PointerIsMine((void*)addr)) { - MBlock *b = user_mblock(thr, (void*)addr); - CHECK_NE(b, 0); - MBlock::ScopedLock l(b); - SyncVar *res = 0; - for (res = b->ListHead(); res; res = res->next) { - if (res->addr == addr) - break; - } - if (res == 0) { - if (!create) - return 0; - res = Create(thr, pc, addr); - b->ListPush(res); - } - if (write_lock) - res->mtx.Lock(); - else - res->mtx.ReadLock(); - return res; - } -#endif - - Part *p = &tab_[PartIdx(addr)]; - { - ReadLock l(&p->mtx); - for (SyncVar *res = p->val; res; res = res->next) { - if (res->addr == addr) { - if (write_lock) - res->mtx.Lock(); - else - res->mtx.ReadLock(); - return res; - } - } - } - if (!create) +uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { + MBlock* b = GetBlock(p); + if (b == 0) return 0; - { - Lock l(&p->mtx); - SyncVar *res = p->val; - for (; res; res = res->next) { - if (res->addr == addr) + uptr sz = RoundUpTo(b->siz, kMetaShadowCell); + FreeRange(thr, pc, p, sz); + return sz; +} + +void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { + u32 *meta = MemToMeta(p); + u32 *end = MemToMeta(p + sz); + if (end == meta) + end++; + for (; meta < end; meta++) { + u32 idx = *meta; + *meta = 0; + for (;;) { + if (idx == 0) break; - } - if (res == 0) { - res = Create(thr, pc, addr); - res->next = p->val; - p->val = res; - } - if (write_lock) - res->mtx.Lock(); - else - res->mtx.ReadLock(); - return res; - } -} - -SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { -#ifndef TSAN_GO - { // NOLINT - SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr); - if (res) - return res; - } - if (PrimaryAllocator::PointerIsMine((void*)addr)) { - MBlock *b = user_mblock(thr, (void*)addr); - CHECK_NE(b, 0); - SyncVar *res = 0; - { - MBlock::ScopedLock l(b); - res = b->ListHead(); - if (res) { - if (res->addr == addr) { - if (res->is_linker_init) - return 0; - b->ListPop(); - } else { - SyncVar **prev = &res->next; - res = *prev; - while (res) { - if (res->addr == addr) { - if (res->is_linker_init) - return 0; - *prev = res->next; - break; - } - prev = &res->next; - res = *prev; - } - } - if (res) { - StatInc(thr, StatSyncDestroyed); - res->mtx.Lock(); - res->mtx.Unlock(); - } - } - } - return res; - } -#endif - - Part *p = &tab_[PartIdx(addr)]; - SyncVar *res = 0; - { - Lock l(&p->mtx); - SyncVar **prev = &p->val; - res = *prev; - while (res) { - if (res->addr == addr) { - if (res->is_linker_init) - return 0; - *prev = res->next; + if (idx & kFlagBlock) { + block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask); break; + } else if (idx & kFlagSync) { + DCHECK(idx & kFlagSync); + SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); + u32 next = s->next; + s->Reset(); + sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask); + idx = next; + } else { + CHECK(0); } - prev = &res->next; - res = *prev; } } - if (res) { - StatInc(thr, StatSyncDestroyed); - res->mtx.Lock(); - res->mtx.Unlock(); - } - return res; -} - -int SyncTab::PartIdx(uptr addr) { - return (addr >> 3) % kPartCount; -} - -StackTrace::StackTrace() - : n_() - , s_() - , c_() { } -StackTrace::StackTrace(uptr *buf, uptr cnt) - : n_() - , s_(buf) - , c_(cnt) { - CHECK_NE(buf, 0); - CHECK_NE(cnt, 0); -} - -StackTrace::~StackTrace() { - Reset(); +MBlock* MetaMap::GetBlock(uptr p) { + u32 *meta = MemToMeta(p); + u32 idx = *meta; + for (;;) { + if (idx == 0) + return 0; + if (idx & kFlagBlock) + return block_alloc_.Map(idx & ~kFlagMask); + DCHECK(idx & kFlagSync); + SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); + idx = s->next; + } } -void StackTrace::Reset() { - if (s_ && !c_) { - CHECK_NE(n_, 0); - internal_free(s_); - s_ = 0; - } - n_ = 0; +SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock) { + return GetAndLock(thr, pc, addr, write_lock, true); } -void StackTrace::Init(const uptr *pcs, uptr cnt) { - Reset(); - if (cnt == 0) - return; - if (c_) { - CHECK_NE(s_, 0); - CHECK_LE(cnt, c_); - } else { - s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0])); - } - n_ = cnt; - internal_memcpy(s_, pcs, cnt * sizeof(s_[0])); +SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) { + return GetAndLock(0, 0, addr, true, false); } -void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) { - Reset(); - n_ = thr->shadow_stack_pos - thr->shadow_stack; - if (n_ + !!toppc == 0) - return; - uptr start = 0; - if (c_) { - CHECK_NE(s_, 0); - if (n_ + !!toppc > c_) { - start = n_ - c_ + !!toppc; - n_ = c_ - !!toppc; +SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock, bool create) { + u32 *meta = MemToMeta(addr); + u32 idx0 = *meta; + u32 myidx = 0; + SyncVar *mys = 0; + for (;;) { + u32 idx = *meta; + for (;;) { + if (idx == 0) + break; + if (idx & kFlagBlock) + break; + DCHECK(idx & kFlagSync); + SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); + if (s->addr == addr) { + if (myidx != 0) { + mys->Reset(); + sync_alloc_.Free(&thr->sync_cache, myidx); + } + if (write_lock) + s->mtx.Lock(); + else + s->mtx.ReadLock(); + return s; + } + idx = s->next; } - } else { - // Cap potentially huge stacks. - if (n_ + !!toppc > kTraceStackSize) { - start = n_ - kTraceStackSize + !!toppc; - n_ = kTraceStackSize - !!toppc; + if (!create) + return 0; + if (*meta != idx0) + continue; + + if (myidx == 0) { + const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); + myidx = sync_alloc_.Alloc(&thr->sync_cache); + mys = sync_alloc_.Map(myidx); + mys->Init(thr, pc, addr, uid); + } + mys->next = idx0; + if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, + myidx | kFlagSync, memory_order_release)) { + if (write_lock) + mys->mtx.Lock(); + else + mys->mtx.ReadLock(); + return mys; } - s_ = (uptr*)internal_alloc(MBlockStackTrace, - (n_ + !!toppc) * sizeof(s_[0])); - } - for (uptr i = 0; i < n_; i++) - s_[i] = thr->shadow_stack[start + i]; - if (toppc) { - s_[n_] = toppc; - n_++; } } -void StackTrace::CopyFrom(const StackTrace& other) { - Reset(); - Init(other.Begin(), other.Size()); -} - -bool StackTrace::IsEmpty() const { - return n_ == 0; -} - -uptr StackTrace::Size() const { - return n_; -} - -uptr StackTrace::Get(uptr i) const { - CHECK_LT(i, n_); - return s_[i]; +void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { + // Here we assume that src and dst do not overlap, + // and there are no concurrent accesses to the regions (e.g. stop-the-world). + uptr diff = dst - src; + u32 *src_meta = MemToMeta(src); + u32 *dst_meta = MemToMeta(dst); + u32 *src_meta_end = MemToMeta(src + sz); + for (; src_meta != src_meta_end; src_meta++, dst_meta++) { + CHECK_EQ(*dst_meta, 0); + u32 idx = *src_meta; + *src_meta = 0; + *dst_meta = idx; + // Patch the addresses in sync objects. + while (idx != 0) { + if (idx & kFlagBlock) + break; + CHECK(idx & kFlagSync); + SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); + s->addr += diff; + idx = s->next; + } + } } -const uptr *StackTrace::Begin() const { - return s_; +void MetaMap::OnThreadIdle(ThreadState *thr) { + block_alloc_.FlushCache(&thr->block_cache); + sync_alloc_.FlushCache(&thr->sync_cache); } } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h index ed0ac595420..7c8682fc16b 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -16,46 +16,21 @@ #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" -#include "tsan_clock.h" #include "tsan_defs.h" +#include "tsan_clock.h" #include "tsan_mutex.h" +#include "tsan_dense_alloc.h" namespace __tsan { -class StackTrace { - public: - StackTrace(); - // Initialized the object in "static mode", - // in this mode it never calls malloc/free but uses the provided buffer. - StackTrace(uptr *buf, uptr cnt); - ~StackTrace(); - void Reset(); - - void Init(const uptr *pcs, uptr cnt); - void ObtainCurrent(ThreadState *thr, uptr toppc); - bool IsEmpty() const; - uptr Size() const; - uptr Get(uptr i) const; - const uptr *Begin() const; - void CopyFrom(const StackTrace& other); - - private: - uptr n_; - uptr *s_; - const uptr c_; - - StackTrace(const StackTrace&); - void operator = (const StackTrace&); -}; - struct SyncVar { - explicit SyncVar(uptr addr, u64 uid); + SyncVar(); static const int kInvalidTid = -1; + uptr addr; // overwritten by DenseSlabAlloc freelist Mutex mtx; - uptr addr; - const u64 uid; // Globally unique id. + u64 uid; // Globally unique id. u32 creation_stack_id; int owner_tid; // Set only by exclusive owners. u64 last_lock; @@ -64,13 +39,16 @@ struct SyncVar { bool is_recursive; bool is_broken; bool is_linker_init; - SyncVar *next; // In SyncTab hashtable. + u32 next; // in MetaMap DDMutex dd; SyncClock read_clock; // Used for rw mutexes only. // The clock is placed last, so that it is situated on a different cache line // with the mtx. This reduces contention for hot sync objects. SyncClock clock; + void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid); + void Reset(); + u64 GetId() const { // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits. return GetLsb((u64)addr | (uid << 47), 61); @@ -85,40 +63,39 @@ struct SyncVar { } }; -class SyncTab { +/* MetaMap allows to map arbitrary user pointers onto various descriptors. + Currently it maps pointers to heap block descriptors and sync var descs. + It uses 1/2 direct shadow, see tsan_platform.h. +*/ +class MetaMap { public: - SyncTab(); - ~SyncTab(); + MetaMap(); + + void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); + uptr FreeBlock(ThreadState *thr, uptr pc, uptr p); + void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz); + MBlock* GetBlock(uptr p); SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock); - SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock); + SyncVar* GetIfExistsAndLock(uptr addr); - // If the SyncVar does not exist, returns 0. - SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr); + void MoveMemory(uptr src, uptr dst, uptr sz); - SyncVar* Create(ThreadState *thr, uptr pc, uptr addr); + void OnThreadIdle(ThreadState *thr); private: - struct Part { - Mutex mtx; - SyncVar *val; - char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT - Part(); - }; - - // FIXME: Implement something more sane. - static const int kPartCount = 1009; - Part tab_[kPartCount]; + static const u32 kFlagMask = 3 << 30; + static const u32 kFlagBlock = 1 << 30; + static const u32 kFlagSync = 2 << 30; + typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc; + typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc; + BlockAlloc block_alloc_; + SyncAlloc sync_alloc_; atomic_uint64_t uid_gen_; - int PartIdx(uptr addr); - - SyncVar* GetAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock, bool create); - - SyncTab(const SyncTab&); // Not implemented. - void operator = (const SyncTab&); // Not implemented. + SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, + bool create); }; } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_trace.h b/compiler-rt/lib/tsan/rtl/tsan_trace.h index 5ed0356e2bf..686160cfd69 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_trace.h +++ b/compiler-rt/lib/tsan/rtl/tsan_trace.h @@ -15,7 +15,7 @@ #include "tsan_defs.h" #include "tsan_mutex.h" -#include "tsan_sync.h" +#include "tsan_stack_trace.h" #include "tsan_mutexset.h" namespace __tsan { diff --git a/compiler-rt/lib/tsan/rtl/tsan_vector.h b/compiler-rt/lib/tsan/rtl/tsan_vector.h index ae84522a7dc..a7fb3fa58d5 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_vector.h +++ b/compiler-rt/lib/tsan/rtl/tsan_vector.h @@ -78,6 +78,10 @@ class Vector { } void Resize(uptr size) { + if (size == 0) { + end_ = begin_; + return; + } uptr old_size = Size(); EnsureSize(size); if (old_size < size) { @@ -100,7 +104,7 @@ class Vector { return; } uptr cap0 = last_ - begin_; - uptr cap = 2 * cap0; + uptr cap = cap0 * 5 / 4; // 25% growth if (cap == 0) cap = 16; if (cap < size) diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cc b/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cc new file mode 100644 index 00000000000..fc9e4cbb7df --- /dev/null +++ b/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cc @@ -0,0 +1,55 @@ +//===-- tsan_dense_alloc_test.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_dense_alloc.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" +#include "gtest/gtest.h" + +#include <stdlib.h> +#include <stdint.h> +#include <map> + +namespace __tsan { + +TEST(DenseSlabAlloc, Basic) { + typedef DenseSlabAlloc<int, 128, 128> Alloc; + typedef Alloc::Cache Cache; + typedef Alloc::IndexT IndexT; + const int N = 1000; + + Alloc alloc; + Cache cache; + alloc.InitCache(&cache); + + IndexT blocks[N]; + for (int ntry = 0; ntry < 3; ntry++) { + for (int i = 0; i < N; i++) { + IndexT idx = alloc.Alloc(&cache); + blocks[i] = idx; + EXPECT_NE(idx, 0); + int *v = alloc.Map(idx); + *v = i; + } + + for (int i = 0; i < N; i++) { + IndexT idx = blocks[i]; + int *v = alloc.Map(idx); + EXPECT_EQ(*v, i); + alloc.Free(&cache, idx); + } + + alloc.FlushCache(&cache); + } +} + +} // namespace __tsan diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc b/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc index 5e39bea8cfc..d57ecbf6413 100644 --- a/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc +++ b/compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc @@ -51,20 +51,8 @@ TEST(Mman, User) { char *p2 = (char*)user_alloc(thr, pc, 20); EXPECT_NE(p2, (char*)0); EXPECT_NE(p2, p); - MBlock *b = user_mblock(thr, p); - EXPECT_NE(b, (MBlock*)0); - EXPECT_EQ(b->Size(), (uptr)10); - MBlock *b2 = user_mblock(thr, p2); - EXPECT_NE(b2, (MBlock*)0); - EXPECT_EQ(b2->Size(), (uptr)20); - for (int i = 0; i < 10; i++) { - p[i] = 42; - EXPECT_EQ(b, user_mblock(thr, p + i)); - } - for (int i = 0; i < 20; i++) { - ((char*)p2)[i] = 42; - EXPECT_EQ(b2, user_mblock(thr, p2 + i)); - } + EXPECT_EQ(user_alloc_usable_size(thr, pc, p), (uptr)10); + EXPECT_EQ(user_alloc_usable_size(thr, pc, p2), (uptr)20); user_free(thr, pc, p); user_free(thr, pc, p2); } diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cc b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cc index 1cfcf99eeac..664ce7f0dbc 100644 --- a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cc +++ b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cc @@ -12,53 +12,100 @@ //===----------------------------------------------------------------------===// #include "tsan_sync.h" #include "tsan_rtl.h" -#include "tsan_mman.h" #include "gtest/gtest.h" -#include <stdlib.h> -#include <stdint.h> -#include <map> - namespace __tsan { -TEST(Sync, Table) { - const uintptr_t kIters = 512*1024; - const uintptr_t kRange = 10000; +TEST(MetaMap, Basic) { + ThreadState *thr = cur_thread(); + MetaMap *m = &ctx->metamap; + u64 block[1] = {}; // fake malloc block + m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64)); + MBlock *mb = m->GetBlock((uptr)&block[0]); + EXPECT_NE(mb, (MBlock*)0); + EXPECT_EQ(mb->siz, 1 * sizeof(u64)); + EXPECT_EQ(mb->tid, thr->tid); + uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]); + EXPECT_EQ(sz, 1 * sizeof(u64)); + mb = m->GetBlock((uptr)&block[0]); + EXPECT_EQ(mb, (MBlock*)0); +} + +TEST(MetaMap, FreeRange) { + ThreadState *thr = cur_thread(); + MetaMap *m = &ctx->metamap; + u64 block[4] = {}; // fake malloc block + m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64)); + m->AllocBlock(thr, 0, (uptr)&block[1], 3 * sizeof(u64)); + MBlock *mb1 = m->GetBlock((uptr)&block[0]); + EXPECT_EQ(mb1->siz, 1 * sizeof(u64)); + MBlock *mb2 = m->GetBlock((uptr)&block[1]); + EXPECT_EQ(mb2->siz, 3 * sizeof(u64)); + m->FreeRange(thr, 0, (uptr)&block[0], 4 * sizeof(u64)); + mb1 = m->GetBlock((uptr)&block[0]); + EXPECT_EQ(mb1, (MBlock*)0); + mb2 = m->GetBlock((uptr)&block[1]); + EXPECT_EQ(mb2, (MBlock*)0); +} +TEST(MetaMap, Sync) { ThreadState *thr = cur_thread(); - uptr pc = 0; + MetaMap *m = &ctx->metamap; + u64 block[4] = {}; // fake malloc block + m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64)); + SyncVar *s1 = m->GetIfExistsAndLock((uptr)&block[0]); + EXPECT_EQ(s1, (SyncVar*)0); + s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true); + EXPECT_NE(s1, (SyncVar*)0); + EXPECT_EQ(s1->addr, (uptr)&block[0]); + s1->mtx.Unlock(); + SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[1], false); + EXPECT_NE(s2, (SyncVar*)0); + EXPECT_EQ(s2->addr, (uptr)&block[1]); + s2->mtx.ReadUnlock(); + m->FreeBlock(thr, 0, (uptr)&block[0]); + s1 = m->GetIfExistsAndLock((uptr)&block[0]); + EXPECT_EQ(s1, (SyncVar*)0); + s2 = m->GetIfExistsAndLock((uptr)&block[1]); + EXPECT_EQ(s2, (SyncVar*)0); + m->OnThreadIdle(thr); +} - SyncTab tab; - SyncVar *golden[kRange] = {}; - unsigned seed = 0; - for (uintptr_t i = 0; i < kIters; i++) { - uintptr_t addr = rand_r(&seed) % (kRange - 1) + 1; - if (rand_r(&seed) % 2) { - // Get or add. - SyncVar *v = tab.GetOrCreateAndLock(thr, pc, addr, true); - EXPECT_TRUE(golden[addr] == 0 || golden[addr] == v); - EXPECT_EQ(v->addr, addr); - golden[addr] = v; - v->mtx.Unlock(); - } else { - // Remove. - SyncVar *v = tab.GetAndRemove(thr, pc, addr); - EXPECT_EQ(golden[addr], v); - if (v) { - EXPECT_EQ(v->addr, addr); - golden[addr] = 0; - DestroyAndFree(v); - } - } - } - for (uintptr_t addr = 0; addr < kRange; addr++) { - if (golden[addr] == 0) - continue; - SyncVar *v = tab.GetAndRemove(thr, pc, addr); - EXPECT_EQ(v, golden[addr]); - EXPECT_EQ(v->addr, addr); - DestroyAndFree(v); - } +TEST(MetaMap, MoveMemory) { + ThreadState *thr = cur_thread(); + MetaMap *m = &ctx->metamap; + u64 block1[4] = {}; // fake malloc block + u64 block2[4] = {}; // fake malloc block + m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64)); + m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64)); + SyncVar *s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[0], true); + s1->mtx.Unlock(); + SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[1], true); + s2->mtx.Unlock(); + m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64)); + MBlock *mb1 = m->GetBlock((uptr)&block1[0]); + EXPECT_EQ(mb1, (MBlock*)0); + MBlock *mb2 = m->GetBlock((uptr)&block1[3]); + EXPECT_EQ(mb2, (MBlock*)0); + mb1 = m->GetBlock((uptr)&block2[0]); + EXPECT_NE(mb1, (MBlock*)0); + EXPECT_EQ(mb1->siz, 3 * sizeof(u64)); + mb2 = m->GetBlock((uptr)&block2[3]); + EXPECT_NE(mb2, (MBlock*)0); + EXPECT_EQ(mb2->siz, 1 * sizeof(u64)); + s1 = m->GetIfExistsAndLock((uptr)&block1[0]); + EXPECT_EQ(s1, (SyncVar*)0); + s2 = m->GetIfExistsAndLock((uptr)&block1[1]); + EXPECT_EQ(s2, (SyncVar*)0); + s1 = m->GetIfExistsAndLock((uptr)&block2[0]); + EXPECT_NE(s1, (SyncVar*)0); + EXPECT_EQ(s1->addr, (uptr)&block2[0]); + s1->mtx.Unlock(); + s2 = m->GetIfExistsAndLock((uptr)&block2[1]); + EXPECT_NE(s2, (SyncVar*)0); + EXPECT_EQ(s2->addr, (uptr)&block2[1]); + s2->mtx.Unlock(); + m->FreeRange(thr, 0, (uptr)&block2[0], 4 * sizeof(u64)); } } // namespace __tsan diff --git a/compiler-rt/test/tsan/java_alloc.cc b/compiler-rt/test/tsan/java_alloc.cc index 0c9c4eb186f..4a606f7940d 100644 --- a/compiler-rt/test/tsan/java_alloc.cc +++ b/compiler-rt/test/tsan/java_alloc.cc @@ -19,14 +19,20 @@ void *Thread(void *p) { } int main() { - jptr jheap = (jptr)malloc(kHeapSize); + jptr jheap = (jptr)malloc(kHeapSize + 8) + 8; __tsan_java_init(jheap, kHeapSize); pthread_t th; pthread_create(&th, 0, Thread, (void*)(jheap + kHeapSize / 4)); stress(jheap); pthread_join(th, 0); - printf("OK\n"); - return __tsan_java_fini(); + if (__tsan_java_fini() != 0) { + printf("FAILED\n"); + return 1; + } + printf("DONE\n"); + return 0; } // CHECK-NOT: WARNING: ThreadSanitizer: data race +// CHECK-NOT: FAILED +// CHECK: DONE diff --git a/compiler-rt/test/tsan/java_lock_rec_race.cc b/compiler-rt/test/tsan/java_lock_rec_race.cc index 41aa1ca6fc9..c802b9e7780 100644 --- a/compiler-rt/test/tsan/java_lock_rec_race.cc +++ b/compiler-rt/test/tsan/java_lock_rec_race.cc @@ -25,7 +25,7 @@ void *Thread(void *p) { int main() { int const kHeapSize = 1024 * 1024; - void *jheap = malloc(kHeapSize); + void *jheap = (char*)malloc(kHeapSize + 8) + 8; __tsan_java_init((jptr)jheap, kHeapSize); const int kBlockSize = 16; __tsan_java_alloc((jptr)jheap, kBlockSize); diff --git a/compiler-rt/test/tsan/java_race.cc b/compiler-rt/test/tsan/java_race.cc index 70ad8c438fb..27b5c7fb6e5 100644 --- a/compiler-rt/test/tsan/java_race.cc +++ b/compiler-rt/test/tsan/java_race.cc @@ -8,7 +8,7 @@ void *Thread(void *p) { int main() { int const kHeapSize = 1024 * 1024; - void *jheap = malloc(kHeapSize); + void *jheap = (char*)malloc(kHeapSize + 8) + 8; __tsan_java_init((jptr)jheap, kHeapSize); const int kBlockSize = 16; __tsan_java_alloc((jptr)jheap, kBlockSize); diff --git a/compiler-rt/test/tsan/java_race_move.cc b/compiler-rt/test/tsan/java_race_move.cc index 76da91b93f9..b7446665a39 100644 --- a/compiler-rt/test/tsan/java_race_move.cc +++ b/compiler-rt/test/tsan/java_race_move.cc @@ -12,7 +12,7 @@ void *Thread(void *p) { int main() { int const kHeapSize = 1024 * 1024; - void *jheap = malloc(kHeapSize); + void *jheap = (char*)malloc(kHeapSize + 8) + 8; __tsan_java_init((jptr)jheap, kHeapSize); const int kBlockSize = 64; int const kMove = 1024; diff --git a/compiler-rt/test/tsan/mutexset7.cc b/compiler-rt/test/tsan/mutexset7.cc index a8a907a8896..b80eddb083c 100644 --- a/compiler-rt/test/tsan/mutexset7.cc +++ b/compiler-rt/test/tsan/mutexset7.cc @@ -13,12 +13,13 @@ void *Thread1(void *x) { } void *Thread2(void *x) { - pthread_mutex_t mtx; - pthread_mutex_init(&mtx, 0); - pthread_mutex_lock(&mtx); + pthread_mutex_t *mtx = new pthread_mutex_t; + pthread_mutex_init(mtx, 0); + pthread_mutex_lock(mtx); Global--; - pthread_mutex_unlock(&mtx); - pthread_mutex_destroy(&mtx); + pthread_mutex_unlock(mtx); + pthread_mutex_destroy(mtx); + delete mtx; return NULL; } diff --git a/compiler-rt/test/tsan/oob_race.cc b/compiler-rt/test/tsan/oob_race.cc deleted file mode 100644 index 16c59c678a5..00000000000 --- a/compiler-rt/test/tsan/oob_race.cc +++ /dev/null @@ -1,26 +0,0 @@ -// RUN: %clangxx_tsan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s -#include <pthread.h> -#include <stdio.h> -#include <unistd.h> - -const long kOffset = 64*1024; - -void *Thread(void *p) { - sleep(1); - ((char*)p)[-kOffset] = 43; - return 0; -} - -int main() { - char *volatile p0 = new char[16]; - delete[] p0; - char *p = new char[32]; - pthread_t th; - pthread_create(&th, 0, Thread, p); - p[-kOffset] = 42; - pthread_join(th, 0); -} - -// Used to crash with CHECK failed. -// CHECK: WARNING: ThreadSanitizer: data race - |