diff options
Diffstat (limited to 'compiler-rt')
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_interceptors.cc | 1 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_sync.cc | 60 | ||||
-rw-r--r-- | compiler-rt/lib/tsan/rtl/tsan_sync.h | 3 | ||||
-rw-r--r-- | compiler-rt/test/tsan/java_heap_init.cc | 28 |
4 files changed, 90 insertions, 2 deletions
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc index a67ed285dab..a33410bb76f 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc @@ -797,6 +797,7 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot, TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); DontNeedShadowFor((uptr)addr, sz); + ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz); int res = REAL(munmap)(addr, sz); return res; } diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cc b/compiler-rt/lib/tsan/rtl/tsan_sync.cc index 1041073bed5..3d2463353fb 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cc @@ -80,7 +80,8 @@ uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { return sz; } -void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { +bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { + bool has_something = false; u32 *meta = MemToMeta(p); u32 *end = MemToMeta(p + sz); if (end == meta) @@ -91,6 +92,7 @@ void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { for (;;) { if (idx == 0) break; + has_something = true; if (idx & kFlagBlock) { block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask); break; @@ -106,6 +108,62 @@ void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { } } } + return has_something; +} + +// ResetRange removes all meta objects from the range. +// It is called for large mmap-ed regions. The function is best-effort wrt +// freeing of meta objects, because we don't want to page in the whole range +// which can be huge. The function probes pages one-by-one until it finds a page +// without meta objects, at this point it stops freeing meta objects. Because +// thread stacks grow top-down, we do the same starting from end as well. +void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { + const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; + const uptr kPageSize = GetPageSizeCached() * kMetaRatio; + if (sz <= 4 * kPageSize) { + // If the range is small, just do the normal free procedure. + FreeRange(thr, pc, p, sz); + return; + } + // First, round both ends of the range to page size. + uptr diff = RoundUp(p, kPageSize) - p; + if (diff != 0) { + FreeRange(thr, pc, p, diff); + p += diff; + sz -= diff; + } + diff = p + sz - RoundDown(p + sz, kPageSize); + if (diff != 0) { + FreeRange(thr, pc, p + sz - diff, diff); + sz -= diff; + } + // Now we must have a non-empty page-aligned range. + CHECK_GT(sz, 0); + CHECK_EQ(p, RoundUp(p, kPageSize)); + CHECK_EQ(sz, RoundUp(sz, kPageSize)); + const uptr p0 = p; + const uptr sz0 = sz; + // Probe start of the range. + while (sz > 0) { + bool has_something = FreeRange(thr, pc, p, kPageSize); + p += kPageSize; + sz -= kPageSize; + if (!has_something) + break; + } + // Probe end of the range. + while (sz > 0) { + bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize); + sz -= kPageSize; + if (!has_something) + break; + } + // Finally, page out the whole range (including the parts that we've just + // freed). Note: we can't simply madvise, because we need to leave a zeroed + // range (otherwise __tsan_java_move can crash if it encounters a left-over + // meta objects in java heap). + UnmapOrDie((void*)p0, sz0); + MmapFixedNoReserve(p0, sz0); } MBlock* MetaMap::GetBlock(uptr p) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h index 574810d8a54..2d12cdff8b2 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -73,7 +73,8 @@ class MetaMap { void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); uptr FreeBlock(ThreadState *thr, uptr pc, uptr p); - void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz); + bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz); + void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz); MBlock* GetBlock(uptr p); SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, diff --git a/compiler-rt/test/tsan/java_heap_init.cc b/compiler-rt/test/tsan/java_heap_init.cc new file mode 100644 index 00000000000..bb7357c25b4 --- /dev/null +++ b/compiler-rt/test/tsan/java_heap_init.cc @@ -0,0 +1,28 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s +#include "java.h" +#include <errno.h> +#include <sys/mman.h> + +int main() { + // Test that munmap interceptor resets meta shadow for the memory range. + // Previously __tsan_java_move failed because it encountered non-zero meta + // shadow for the destination. + int const kHeapSize = 1024 * 1024; + jptr jheap = (jptr)mmap(0, kHeapSize, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + if (jheap == (jptr)MAP_FAILED) + return printf("mmap failed with %d\n", errno); + __atomic_store_n((int*)jheap, 1, __ATOMIC_RELEASE); + munmap((void*)jheap, kHeapSize); + jheap = (jptr)mmap((void*)jheap, kHeapSize, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + if (jheap == (jptr)MAP_FAILED) + return printf("second mmap failed with %d\n", errno); + __tsan_java_init(jheap, kHeapSize); + __tsan_java_move(jheap + 16, jheap, 16); + printf("DONE\n"); + return __tsan_java_fini(); +} + +// CHECK-NOT: WARNING: ThreadSanitizer: data race +// CHECK: DONE |