diff options
| author | Kostya Serebryany <kcc@google.com> | 2012-12-27 07:37:24 +0000 |
|---|---|---|
| committer | Kostya Serebryany <kcc@google.com> | 2012-12-27 07:37:24 +0000 |
| commit | 6f604b50072a990c7a2436262d1bce1fb744414e (patch) | |
| tree | 5684348557118104f3255427876a57dbc84a64f2 | |
| parent | 757f3fc394cb57c721ce12373fb34ed5ff19e74d (diff) | |
| download | bcm5719-llvm-6f604b50072a990c7a2436262d1bce1fb744414e.tar.gz bcm5719-llvm-6f604b50072a990c7a2436262d1bce1fb744414e.zip | |
[asan/tsan] when unmapping a chunk of user memory, apply madvise(MADV_DONTNEED) to the corresponding chunk of shadow memory. Also update sanitizer_allocator64_testlib.cc
llvm-svn: 171144
6 files changed, 57 insertions, 12 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator2.cc b/compiler-rt/lib/asan/asan_allocator2.cc index 425ce858591..0590479bd44 100644 --- a/compiler-rt/lib/asan/asan_allocator2.cc +++ b/compiler-rt/lib/asan/asan_allocator2.cc @@ -37,10 +37,17 @@ struct AsanMapUnmapCallback { AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; - // thread_stats.mmaped_by_size[size_class] += n_chunks; } void OnUnmap(uptr p, uptr size) const { PoisonShadow(p, size, 0); + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + // Since asan's mapping is compacting, the shadow chunk may be + // not page-aligned, so we only flush the page-aligned portion. + uptr page_size = GetPageSizeCached(); + uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); + uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); + FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); // Statistics. AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); thread_stats.munmaps++; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h index 74962b44b85..1d002398c78 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -50,6 +50,7 @@ void *Mprotect(uptr fixed_addr, uptr size); void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); // Used to check if we can map shadow memory to a fixed location. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); +void FlushUnneededShadowMemory(uptr addr, uptr size); // Internal allocator void *InternalAlloc(uptr size); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc index 0cc514d3a56..32657838600 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc @@ -115,6 +115,10 @@ void *Mprotect(uptr fixed_addr, uptr size) { -1, 0); } +void FlushUnneededShadowMemory(uptr addr, uptr size) { + madvise((void*)addr, size, MADV_DONTNEED); +} + void *MapFileToMemory(const char *file_name, uptr *buff_size) { fd_t fd = internal_open(file_name, false); CHECK_NE(fd, kInvalidFd); diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc index 3f56cc4463c..3e9c541bceb 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc @@ -10,7 +10,14 @@ // The primary purpose of this file is an end-to-end integration test // for CombinedAllocator. //===----------------------------------------------------------------------===// +/* Usage: +clang++ -fno-exceptions -g -fPIC -I. -I../include -Isanitizer \ + sanitizer_common/tests/sanitizer_allocator64_testlib.cc \ + sanitizer_common/sanitizer_*.cc -shared -o testmalloc.so +LD_PRELOAD=`pwd`/testmalloc.so /your/app +*/ #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_common.h" #include <stddef.h> #include <stdio.h> #include <unistd.h> @@ -20,10 +27,9 @@ namespace { static const uptr kAllocatorSpace = 0x600000000000ULL; static const uptr kAllocatorSize = 0x10000000000; // 1T. -typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 16, - DefaultSizeClassMap> PrimaryAllocator; -typedef SizeClassAllocatorLocalCache<PrimaryAllocator::kNumClasses, - PrimaryAllocator> AllocatorCache; +typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0, + CompactSizeClassMap> PrimaryAllocator; +typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef LargeMmapAllocator<> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; @@ -34,7 +40,7 @@ static Allocator allocator; static int inited = 0; __attribute__((constructor)) -void Init() { +static void Init() { if (inited) return; inited = true; // this must happen before any threads are created. allocator.Init(); @@ -51,37 +57,54 @@ void *malloc(size_t size) { } void free(void *p) { - assert(inited); + if (!inited) return; + // assert(inited); allocator.Deallocate(&cache, p); } void *calloc(size_t nmemb, size_t size) { + Init(); assert(inited); return allocator.Allocate(&cache, nmemb * size, 8, /*cleared=*/true); } void *realloc(void *p, size_t new_size) { + Init(); assert(inited); return allocator.Reallocate(&cache, p, new_size, 8); } -void *memalign() { assert(0); } +void *memalign(size_t boundary, size_t size) { + Init(); + return allocator.Allocate(&cache, size, boundary); +} +void *__libc_memalign(size_t boundary, size_t size) { + Init(); + return allocator.Allocate(&cache, size, boundary); +} int posix_memalign(void **memptr, size_t alignment, size_t size) { + Init(); *memptr = allocator.Allocate(&cache, size, alignment); CHECK_EQ(((uptr)*memptr & (alignment - 1)), 0); return 0; } void *valloc(size_t size) { + Init(); assert(inited); return allocator.Allocate(&cache, size, GetPageSizeCached()); } void *pvalloc(size_t size) { + Init(); assert(inited); if (size == 0) size = GetPageSizeCached(); return allocator.Allocate(&cache, size, GetPageSizeCached()); } + +void malloc_usable_size() { } +void mallinfo() { } +void mallopt() { } } #endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc index 162bcc49d66..6cc42497512 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc @@ -71,9 +71,7 @@ uptr GetShadowMemoryConsumption() { } void FlushShadowMemory() { - madvise((void*)kLinuxShadowBeg, - kLinuxShadowEnd - kLinuxShadowBeg, - MADV_DONTNEED); + FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); } #ifndef TSAN_GO diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index 2a720cb0d11..6b0ab0d385e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -65,10 +65,22 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL; #endif const uptr kAllocatorSize = 0x10000000000ULL; // 1T. +struct TsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const { } + void OnUnmap(uptr p, uptr size) const { + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + uptr shadow_beg = MemToShadow(p); + uptr shadow_end = MemToShadow(p + size); + CHECK(IsAligned(shadow_end|shadow_beg, GetPageSizeCached())); + FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); + } +}; + typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), DefaultSizeClassMap> PrimaryAllocator; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; -typedef LargeMmapAllocator<> SecondaryAllocator; +typedef LargeMmapAllocator<TsanMapUnmapCallback> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; Allocator *allocator(); |

