diff options
author | Kostya Serebryany <kcc@google.com> | 2012-12-05 10:09:15 +0000 |
---|---|---|
committer | Kostya Serebryany <kcc@google.com> | 2012-12-05 10:09:15 +0000 |
commit | 571232b8cf60847ac7d7d93cbc5af7aecadcb2b9 (patch) | |
tree | 9bb4251a482b1d07ce16447d1c44046c7e3d762f | |
parent | c01cca2d4c4aa85a166fd6704b6571ccbd3edc34 (diff) | |
download | bcm5719-llvm-571232b8cf60847ac7d7d93cbc5af7aecadcb2b9.tar.gz bcm5719-llvm-571232b8cf60847ac7d7d93cbc5af7aecadcb2b9.zip |
[tsan] get rid of *allocator64* files, moving everything to *allocator* files. This will help with the 32-bit allocator implementation and testing
llvm-svn: 169368
8 files changed, 522 insertions, 547 deletions
diff --git a/compiler-rt/lib/asan/asan_interceptors.cc b/compiler-rt/lib/asan/asan_interceptors.cc index efd4bc53a2b..e4706828a09 100644 --- a/compiler-rt/lib/asan/asan_interceptors.cc +++ b/compiler-rt/lib/asan/asan_interceptors.cc @@ -179,6 +179,8 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) { #if ASAN_INTERCEPT___CXA_THROW INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { + Printf("__asan's __cxa_throw %p; REAL(__cxa_throw) %p PLAIN %p\n", + __interceptor___cxa_throw, REAL(__cxa_throw), __cxa_throw); CHECK(REAL(__cxa_throw)); __asan_handle_no_return(); REAL(__cxa_throw)(a, b, c); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h index 58d28604a72..b2aff5dfe07 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -99,6 +99,208 @@ struct AllocatorListNode { typedef IntrusiveList<AllocatorListNode> AllocatorFreeList; +// SizeClassAllocator64 -- allocator for 64-bit address space. +// +// Space: a portion of address space of kSpaceSize bytes starting at +// a fixed address (kSpaceBeg). Both constants are powers of two and +// kSpaceBeg is kSpaceSize-aligned. +// +// Region: a part of Space dedicated to a single size class. +// There are kNumClasses Regions of equal size. +// +// UserChunk: a piece of memory returned to user. +// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. +// +// A Region looks like this: +// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 +template <const uptr kSpaceBeg, const uptr kSpaceSize, + const uptr kMetadataSize, class SizeClassMap> +class SizeClassAllocator64 { + public: + void Init() { + CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve( + AllocBeg(), AllocSize()))); + } + + bool CanAllocate(uptr size, uptr alignment) { + return size <= SizeClassMap::kMaxSize && + alignment <= SizeClassMap::kMaxSize; + } + + void *Allocate(uptr size, uptr alignment) { + CHECK(CanAllocate(size, alignment)); + return AllocateBySizeClass(SizeClassMap::ClassID(size)); + } + + void Deallocate(void *p) { + CHECK(PointerIsMine(p)); + DeallocateBySizeClass(p, GetSizeClass(p)); + } + + // Allocate several chunks of the given class_id. + void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) { + CHECK_LT(class_id, kNumClasses); + RegionInfo *region = GetRegionInfo(class_id); + SpinMutexLock l(®ion->mutex); + if (region->free_list.empty()) { + PopulateFreeList(class_id, region); + } + CHECK(!region->free_list.empty()); + uptr count = SizeClassMap::MaxCached(class_id); + if (region->free_list.size() <= count) { + free_list->append_front(®ion->free_list); + } else { + for (uptr i = 0; i < count; i++) { + AllocatorListNode *node = region->free_list.front(); + region->free_list.pop_front(); + free_list->push_front(node); + } + } + CHECK(!free_list->empty()); + } + + // Swallow the entire free_list for the given class_id. + void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) { + CHECK_LT(class_id, kNumClasses); + RegionInfo *region = GetRegionInfo(class_id); + SpinMutexLock l(®ion->mutex); + region->free_list.append_front(free_list); + } + + static bool PointerIsMine(void *p) { + return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize; + } + + static uptr GetSizeClass(void *p) { + return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses; + } + + static void *GetBlockBegin(void *p) { + uptr class_id = GetSizeClass(p); + uptr size = SizeClassMap::Size(class_id); + uptr chunk_idx = GetChunkIdx((uptr)p, size); + uptr reg_beg = (uptr)p & ~(kRegionSize - 1); + uptr begin = reg_beg + chunk_idx * size; + return (void*)begin; + } + + static uptr GetActuallyAllocatedSize(void *p) { + CHECK(PointerIsMine(p)); + return SizeClassMap::Size(GetSizeClass(p)); + } + + uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } + + void *GetMetaData(void *p) { + uptr class_id = GetSizeClass(p); + uptr size = SizeClassMap::Size(class_id); + uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); + return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) - + (1 + chunk_idx) * kMetadataSize); + } + + uptr TotalMemoryUsed() { + uptr res = 0; + for (uptr i = 0; i < kNumClasses; i++) + res += GetRegionInfo(i)->allocated_user; + return res; + } + + // Test-only. + void TestOnlyUnmap() { + UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize()); + } + + static uptr AllocBeg() { return kSpaceBeg; } + static uptr AllocSize() { return kSpaceSize + AdditionalSize(); } + + typedef SizeClassMap SizeClassMapT; + static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256 + + private: + static const uptr kRegionSize = kSpaceSize / kNumClasses; + COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0); + COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses); + // kRegionSize must be >= 2^32. + COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); + // Populate the free list with at most this number of bytes at once + // or with one element if its size is greater. + static const uptr kPopulateSize = 1 << 18; + + struct RegionInfo { + SpinMutex mutex; + AllocatorFreeList free_list; + uptr allocated_user; // Bytes allocated for user memory. + uptr allocated_meta; // Bytes allocated for metadata. + char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)]; + }; + COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize); + + static uptr AdditionalSize() { + uptr PageSize = GetPageSizeCached(); + uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize); + CHECK_EQ(res % PageSize, 0); + return res; + } + + RegionInfo *GetRegionInfo(uptr class_id) { + CHECK_LT(class_id, kNumClasses); + RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize); + return ®ions[class_id]; + } + + static uptr GetChunkIdx(uptr chunk, uptr size) { + u32 offset = chunk % kRegionSize; + // Here we divide by a non-constant. This is costly. + // We require that kRegionSize is at least 2^32 so that offset is 32-bit. + // We save 2x by using 32-bit div, but may need to use a 256-way switch. + return offset / (u32)size; + } + + void PopulateFreeList(uptr class_id, RegionInfo *region) { + uptr size = SizeClassMap::Size(class_id); + uptr beg_idx = region->allocated_user; + uptr end_idx = beg_idx + kPopulateSize; + region->free_list.clear(); + uptr region_beg = kSpaceBeg + kRegionSize * class_id; + uptr idx = beg_idx; + uptr i = 0; + do { // do-while loop because we need to put at least one item. + uptr p = region_beg + idx; + region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p)); + idx += size; + i++; + } while (idx < end_idx); + region->allocated_user += idx - beg_idx; + region->allocated_meta += i * kMetadataSize; + if (region->allocated_user + region->allocated_meta > kRegionSize) { + Printf("Out of memory. Dying.\n"); + Printf("The process has exhausted %zuMB for size class %zu.\n", + kRegionSize / 1024 / 1024, size); + Die(); + } + } + + void *AllocateBySizeClass(uptr class_id) { + CHECK_LT(class_id, kNumClasses); + RegionInfo *region = GetRegionInfo(class_id); + SpinMutexLock l(®ion->mutex); + if (region->free_list.empty()) { + PopulateFreeList(class_id, region); + } + CHECK(!region->free_list.empty()); + AllocatorListNode *node = region->free_list.front(); + region->free_list.pop_front(); + return reinterpret_cast<void*>(node); + } + + void DeallocateBySizeClass(void *p, uptr class_id) { + RegionInfo *region = GetRegionInfo(class_id); + SpinMutexLock l(®ion->mutex); + region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p)); + } +}; + // Objects of this type should be used as local caches for SizeClassAllocator64. // Since the typical use of this class is to have one object per thread in TLS, // is has to be POD. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h deleted file mode 100644 index f2e13af2008..00000000000 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h +++ /dev/null @@ -1,229 +0,0 @@ -//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// Specialized allocator which works only in 64-bit address space. -// It is used by ThreadSanitizer, MemorySanitizer and possibly other tools. -// The main feature of this allocator is that the header is located far away -// from the user memory region, so that the tool does not use extra shadow -// for the header. -// Another important feature is that the size class of a pointer is computed -// without any memory accesses by simply looking at the address. -// -//===----------------------------------------------------------------------===// -#ifndef SANITIZER_ALLOCATOR64_H -#define SANITIZER_ALLOCATOR64_H - -#include "sanitizer_allocator.h" - -namespace __sanitizer { - -// SizeClassAllocator64 -- allocator for 64-bit address space. -// -// Space: a portion of address space of kSpaceSize bytes starting at -// a fixed address (kSpaceBeg). Both constants are powers of two and -// kSpaceBeg is kSpaceSize-aligned. -// -// Region: a part of Space dedicated to a single size class. -// There are kNumClasses Regions of equal size. -// -// UserChunk: a piece of memory returned to user. -// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. -// -// A Region looks like this: -// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 -template <const uptr kSpaceBeg, const uptr kSpaceSize, - const uptr kMetadataSize, class SizeClassMap> -class SizeClassAllocator64 { - public: - void Init() { - CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve( - AllocBeg(), AllocSize()))); - } - - bool CanAllocate(uptr size, uptr alignment) { - return size <= SizeClassMap::kMaxSize && - alignment <= SizeClassMap::kMaxSize; - } - - void *Allocate(uptr size, uptr alignment) { - CHECK(CanAllocate(size, alignment)); - return AllocateBySizeClass(SizeClassMap::ClassID(size)); - } - - void Deallocate(void *p) { - CHECK(PointerIsMine(p)); - DeallocateBySizeClass(p, GetSizeClass(p)); - } - - // Allocate several chunks of the given class_id. - void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) { - CHECK_LT(class_id, kNumClasses); - RegionInfo *region = GetRegionInfo(class_id); - SpinMutexLock l(®ion->mutex); - if (region->free_list.empty()) { - PopulateFreeList(class_id, region); - } - CHECK(!region->free_list.empty()); - uptr count = SizeClassMap::MaxCached(class_id); - if (region->free_list.size() <= count) { - free_list->append_front(®ion->free_list); - } else { - for (uptr i = 0; i < count; i++) { - AllocatorListNode *node = region->free_list.front(); - region->free_list.pop_front(); - free_list->push_front(node); - } - } - CHECK(!free_list->empty()); - } - - // Swallow the entire free_list for the given class_id. - void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) { - CHECK_LT(class_id, kNumClasses); - RegionInfo *region = GetRegionInfo(class_id); - SpinMutexLock l(®ion->mutex); - region->free_list.append_front(free_list); - } - - static bool PointerIsMine(void *p) { - return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize; - } - - static uptr GetSizeClass(void *p) { - return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses; - } - - static void *GetBlockBegin(void *p) { - uptr class_id = GetSizeClass(p); - uptr size = SizeClassMap::Size(class_id); - uptr chunk_idx = GetChunkIdx((uptr)p, size); - uptr reg_beg = (uptr)p & ~(kRegionSize - 1); - uptr begin = reg_beg + chunk_idx * size; - return (void*)begin; - } - - static uptr GetActuallyAllocatedSize(void *p) { - CHECK(PointerIsMine(p)); - return SizeClassMap::Size(GetSizeClass(p)); - } - - uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } - - void *GetMetaData(void *p) { - uptr class_id = GetSizeClass(p); - uptr size = SizeClassMap::Size(class_id); - uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); - return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) - - (1 + chunk_idx) * kMetadataSize); - } - - uptr TotalMemoryUsed() { - uptr res = 0; - for (uptr i = 0; i < kNumClasses; i++) - res += GetRegionInfo(i)->allocated_user; - return res; - } - - // Test-only. - void TestOnlyUnmap() { - UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize()); - } - - static uptr AllocBeg() { return kSpaceBeg; } - static uptr AllocSize() { return kSpaceSize + AdditionalSize(); } - - typedef SizeClassMap SizeClassMapT; - static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256 - - private: - static const uptr kRegionSize = kSpaceSize / kNumClasses; - COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0); - COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses); - // kRegionSize must be >= 2^32. - COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); - // Populate the free list with at most this number of bytes at once - // or with one element if its size is greater. - static const uptr kPopulateSize = 1 << 18; - - struct RegionInfo { - SpinMutex mutex; - AllocatorFreeList free_list; - uptr allocated_user; // Bytes allocated for user memory. - uptr allocated_meta; // Bytes allocated for metadata. - char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)]; - }; - COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize); - - static uptr AdditionalSize() { - uptr PageSize = GetPageSizeCached(); - uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize); - CHECK_EQ(res % PageSize, 0); - return res; - } - - RegionInfo *GetRegionInfo(uptr class_id) { - CHECK_LT(class_id, kNumClasses); - RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize); - return ®ions[class_id]; - } - - static uptr GetChunkIdx(uptr chunk, uptr size) { - u32 offset = chunk % kRegionSize; - // Here we divide by a non-constant. This is costly. - // We require that kRegionSize is at least 2^32 so that offset is 32-bit. - // We save 2x by using 32-bit div, but may need to use a 256-way switch. - return offset / (u32)size; - } - - void PopulateFreeList(uptr class_id, RegionInfo *region) { - uptr size = SizeClassMap::Size(class_id); - uptr beg_idx = region->allocated_user; - uptr end_idx = beg_idx + kPopulateSize; - region->free_list.clear(); - uptr region_beg = kSpaceBeg + kRegionSize * class_id; - uptr idx = beg_idx; - uptr i = 0; - do { // do-while loop because we need to put at least one item. - uptr p = region_beg + idx; - region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p)); - idx += size; - i++; - } while (idx < end_idx); - region->allocated_user += idx - beg_idx; - region->allocated_meta += i * kMetadataSize; - if (region->allocated_user + region->allocated_meta > kRegionSize) { - Printf("Out of memory. Dying.\n"); - Printf("The process has exhausted %zuMB for size class %zu.\n", - kRegionSize / 1024 / 1024, size); - Die(); - } - } - - void *AllocateBySizeClass(uptr class_id) { - CHECK_LT(class_id, kNumClasses); - RegionInfo *region = GetRegionInfo(class_id); - SpinMutexLock l(®ion->mutex); - if (region->free_list.empty()) { - PopulateFreeList(class_id, region); - } - CHECK(!region->free_list.empty()); - AllocatorListNode *node = region->free_list.front(); - region->free_list.pop_front(); - return reinterpret_cast<void*>(node); - } - - void DeallocateBySizeClass(void *p, uptr class_id) { - RegionInfo *region = GetRegionInfo(class_id); - SpinMutexLock l(®ion->mutex); - region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p)); - } -}; - -} // namespace __sanitizer - -#endif // SANITIZER_ALLOCATOR64_H diff --git a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt index 1c781a1bd19..3baa08bc20d 100644 --- a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt +++ b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt @@ -8,9 +8,6 @@ set(SANITIZER_UNITTESTS sanitizer_stackdepot_test.cc sanitizer_test_main.cc ) -if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT LLVM_BUILD_32_BITS) - list(APPEND SANITIZER_UNITTESTS sanitizer_allocator64_test.cc) -endif() include_directories(..) include_directories(../..) diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc deleted file mode 100644 index eccf70b6207..00000000000 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc +++ /dev/null @@ -1,310 +0,0 @@ -//===-- sanitizer_allocator64_test.cc -------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// Tests for sanitizer_allocator64.h. -//===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_allocator64.h" -#include "gtest/gtest.h" - -#include <algorithm> -#include <vector> - -static const uptr kAllocatorSpace = 0x700000000000ULL; -static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. - -typedef SizeClassAllocator64< - kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64; - -typedef SizeClassAllocator64< - kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact; - -template <class SizeClassMap> -void TestSizeClassMap() { - typedef SizeClassMap SCMap; -#if 0 - for (uptr i = 0; i < SCMap::kNumClasses; i++) { - printf("c%ld => %ld (%lx) cached=%ld(%ld)\n", - i, SCMap::Size(i), SCMap::Size(i), SCMap::MaxCached(i) * SCMap::Size(i), - SCMap::MaxCached(i)); - } -#endif - for (uptr c = 0; c < SCMap::kNumClasses; c++) { - uptr s = SCMap::Size(c); - CHECK_EQ(SCMap::ClassID(s), c); - if (c != SCMap::kNumClasses - 1) - CHECK_EQ(SCMap::ClassID(s + 1), c + 1); - CHECK_EQ(SCMap::ClassID(s - 1), c); - if (c) - CHECK_GT(SCMap::Size(c), SCMap::Size(c-1)); - } - CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0); - - for (uptr s = 1; s <= SCMap::kMaxSize; s++) { - uptr c = SCMap::ClassID(s); - CHECK_LT(c, SCMap::kNumClasses); - CHECK_GE(SCMap::Size(c), s); - if (c > 0) - CHECK_LT(SCMap::Size(c-1), s); - } -} - -TEST(SanitizerCommon, DefaultSizeClassMap) { - TestSizeClassMap<DefaultSizeClassMap>(); -} - -TEST(SanitizerCommon, CompactSizeClassMap) { - TestSizeClassMap<CompactSizeClassMap>(); -} - -template <class Allocator> -void TestSizeClassAllocator() { - Allocator a; - a.Init(); - - static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, - 50000, 60000, 100000, 300000, 500000, 1000000, 2000000}; - - std::vector<void *> allocated; - - uptr last_total_allocated = 0; - for (int i = 0; i < 5; i++) { - // Allocate a bunch of chunks. - for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) { - uptr size = sizes[s]; - if (!a.CanAllocate(size, 1)) continue; - // printf("s = %ld\n", size); - uptr n_iter = std::max((uptr)2, 1000000 / size); - for (uptr i = 0; i < n_iter; i++) { - void *x = a.Allocate(size, 1); - allocated.push_back(x); - CHECK(a.PointerIsMine(x)); - CHECK_GE(a.GetActuallyAllocatedSize(x), size); - uptr class_id = a.GetSizeClass(x); - CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); - uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x)); - metadata[0] = reinterpret_cast<uptr>(x) + 1; - metadata[1] = 0xABCD; - } - } - // Deallocate all. - for (uptr i = 0; i < allocated.size(); i++) { - void *x = allocated[i]; - uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x)); - CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1); - CHECK_EQ(metadata[1], 0xABCD); - a.Deallocate(x); - } - allocated.clear(); - uptr total_allocated = a.TotalMemoryUsed(); - if (last_total_allocated == 0) - last_total_allocated = total_allocated; - CHECK_EQ(last_total_allocated, total_allocated); - } - - a.TestOnlyUnmap(); -} - -TEST(SanitizerCommon, SizeClassAllocator64) { - TestSizeClassAllocator<Allocator64>(); -} - -TEST(SanitizerCommon, SizeClassAllocator64Compact) { - TestSizeClassAllocator<Allocator64Compact>(); -} - -template <class Allocator> -void SizeClassAllocator64MetadataStress() { - Allocator a; - a.Init(); - static volatile void *sink; - - const uptr kNumAllocs = 10000; - void *allocated[kNumAllocs]; - for (uptr i = 0; i < kNumAllocs; i++) { - uptr size = (i % 4096) + 1; - void *x = a.Allocate(size, 1); - allocated[i] = x; - } - // Get Metadata kNumAllocs^2 times. - for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { - sink = a.GetMetaData(allocated[i % kNumAllocs]); - } - for (uptr i = 0; i < kNumAllocs; i++) { - a.Deallocate(allocated[i]); - } - - a.TestOnlyUnmap(); - (void)sink; -} - -TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { - SizeClassAllocator64MetadataStress<Allocator64>(); -} - -TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { - SizeClassAllocator64MetadataStress<Allocator64Compact>(); -} - -template<class Allocator> -void FailInAssertionOnOOM() { - Allocator a; - a.Init(); - const uptr size = 1 << 20; - for (int i = 0; i < 1000000; i++) { - a.Allocate(size, 1); - } - - a.TestOnlyUnmap(); -} - -TEST(SanitizerCommon, SizeClassAllocator64Overflow) { - EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory"); -} - -TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator a; - a.Init(); - - static const int kNumAllocs = 100; - void *allocated[kNumAllocs]; - static const uptr size = 1000; - // Allocate some. - for (int i = 0; i < kNumAllocs; i++) { - allocated[i] = a.Allocate(size, 1); - } - // Deallocate all. - CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); - for (int i = 0; i < kNumAllocs; i++) { - void *p = allocated[i]; - CHECK(a.PointerIsMine(p)); - a.Deallocate(p); - } - // Check that non left. - CHECK_EQ(a.TotalMemoryUsed(), 0); - - // Allocate some more, also add metadata. - for (int i = 0; i < kNumAllocs; i++) { - void *x = a.Allocate(size, 1); - CHECK_GE(a.GetActuallyAllocatedSize(x), size); - uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); - *meta = i; - allocated[i] = x; - } - CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); - // Deallocate all in reverse order. - for (int i = 0; i < kNumAllocs; i++) { - int idx = kNumAllocs - i - 1; - void *p = allocated[idx]; - uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); - CHECK_EQ(*meta, idx); - CHECK(a.PointerIsMine(p)); - a.Deallocate(p); - } - CHECK_EQ(a.TotalMemoryUsed(), 0); - - for (uptr alignment = 8; alignment <= (1<<28); alignment *= 2) { - for (int i = 0; i < kNumAllocs; i++) { - uptr size = ((i % 10) + 1) * 4096; - allocated[i] = a.Allocate(size, alignment); - CHECK_EQ(0, (uptr)allocated[i] % alignment); - char *p = (char*)allocated[i]; - p[0] = p[size - 1] = 0; - } - for (int i = 0; i < kNumAllocs; i++) { - a.Deallocate(allocated[i]); - } - } -} - -template -<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache> -void TestCombinedAllocator() { - CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> a; - a.Init(); - - AllocatorCache cache; - cache.Init(); - - EXPECT_EQ(a.Allocate(&cache, -1, 1), (void*)0); - EXPECT_EQ(a.Allocate(&cache, -1, 1024), (void*)0); - EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); - EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); - EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); - - const uptr kNumAllocs = 100000; - const uptr kNumIter = 10; - for (uptr iter = 0; iter < kNumIter; iter++) { - std::vector<void*> allocated; - for (uptr i = 0; i < kNumAllocs; i++) { - uptr size = (i % (1 << 14)) + 1; - if ((i % 1024) == 0) - size = 1 << (10 + (i % 14)); - void *x = a.Allocate(&cache, size, 1); - uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); - CHECK_EQ(*meta, 0); - *meta = size; - allocated.push_back(x); - } - - random_shuffle(allocated.begin(), allocated.end()); - - for (uptr i = 0; i < kNumAllocs; i++) { - void *x = allocated[i]; - uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); - CHECK_NE(*meta, 0); - CHECK(a.PointerIsMine(x)); - *meta = 0; - a.Deallocate(&cache, x); - } - allocated.clear(); - a.SwallowCache(&cache); - } - a.TestOnlyUnmap(); -} - -TEST(SanitizerCommon, CombinedAllocator) { - TestCombinedAllocator<Allocator64, - LargeMmapAllocator, - SizeClassAllocatorLocalCache<Allocator64> > (); -} - -template <class AllocatorCache> -void TestSizeClassAllocatorLocalCache() { - static THREADLOCAL AllocatorCache static_allocator_cache; - static_allocator_cache.Init(); - AllocatorCache cache; - typename AllocatorCache::Allocator a; - - a.Init(); - cache.Init(); - - const uptr kNumAllocs = 10000; - const int kNumIter = 100; - uptr saved_total = 0; - for (int i = 0; i < kNumIter; i++) { - void *allocated[kNumAllocs]; - for (uptr i = 0; i < kNumAllocs; i++) { - allocated[i] = cache.Allocate(&a, 0); - } - for (uptr i = 0; i < kNumAllocs; i++) { - cache.Deallocate(&a, 0, allocated[i]); - } - cache.Drain(&a); - uptr total_allocated = a.TotalMemoryUsed(); - if (saved_total) - CHECK_EQ(saved_total, total_allocated); - saved_total = total_allocated; - } - - a.TestOnlyUnmap(); -} - -TEST(SanitizerCommon, SizeClassAllocator64LocalCache) { - TestSizeClassAllocatorLocalCache< - SizeClassAllocatorLocalCache<Allocator64> >(); -} diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc index d95c217b677..b41f808b2b9 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc @@ -10,7 +10,7 @@ // The primary purpose of this file is an end-to-end integration test // for CombinedAllocator. //===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_allocator64.h" +#include "sanitizer_common/sanitizer_allocator.h" #include <stddef.h> #include <stdio.h> #include <unistd.h> diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index 80139f0401c..b49e39893ab 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -8,13 +8,328 @@ //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer/AddressSanitizer runtime. +// Tests for sanitizer_allocator.h. // //===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_common.h" + #include "gtest/gtest.h" + #include <stdlib.h> +#include <algorithm> +#include <vector> + +#if SANITIZER_WORDSIZE == 64 +static const uptr kAllocatorSpace = 0x700000000000ULL; +static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. + +typedef SizeClassAllocator64< + kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64; + +typedef SizeClassAllocator64< + kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact; +#endif + +template <class SizeClassMap> +void TestSizeClassMap() { + typedef SizeClassMap SCMap; +#if 0 + for (uptr i = 0; i < SCMap::kNumClasses; i++) { + printf("c%ld => %ld (%lx) cached=%ld(%ld)\n", + i, SCMap::Size(i), SCMap::Size(i), SCMap::MaxCached(i) * SCMap::Size(i), + SCMap::MaxCached(i)); + } +#endif + for (uptr c = 0; c < SCMap::kNumClasses; c++) { + uptr s = SCMap::Size(c); + CHECK_EQ(SCMap::ClassID(s), c); + if (c != SCMap::kNumClasses - 1) + CHECK_EQ(SCMap::ClassID(s + 1), c + 1); + CHECK_EQ(SCMap::ClassID(s - 1), c); + if (c) + CHECK_GT(SCMap::Size(c), SCMap::Size(c-1)); + } + CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0); + + for (uptr s = 1; s <= SCMap::kMaxSize; s++) { + uptr c = SCMap::ClassID(s); + CHECK_LT(c, SCMap::kNumClasses); + CHECK_GE(SCMap::Size(c), s); + if (c > 0) + CHECK_LT(SCMap::Size(c-1), s); + } +} + +TEST(SanitizerCommon, DefaultSizeClassMap) { + TestSizeClassMap<DefaultSizeClassMap>(); +} + +TEST(SanitizerCommon, CompactSizeClassMap) { + TestSizeClassMap<CompactSizeClassMap>(); +} + +template <class Allocator> +void TestSizeClassAllocator() { + Allocator a; + a.Init(); + + static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, + 50000, 60000, 100000, 300000, 500000, 1000000, 2000000}; + + std::vector<void *> allocated; + + uptr last_total_allocated = 0; + for (int i = 0; i < 5; i++) { + // Allocate a bunch of chunks. + for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) { + uptr size = sizes[s]; + if (!a.CanAllocate(size, 1)) continue; + // printf("s = %ld\n", size); + uptr n_iter = std::max((uptr)2, 1000000 / size); + for (uptr i = 0; i < n_iter; i++) { + void *x = a.Allocate(size, 1); + allocated.push_back(x); + CHECK(a.PointerIsMine(x)); + CHECK_GE(a.GetActuallyAllocatedSize(x), size); + uptr class_id = a.GetSizeClass(x); + CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); + uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x)); + metadata[0] = reinterpret_cast<uptr>(x) + 1; + metadata[1] = 0xABCD; + } + } + // Deallocate all. + for (uptr i = 0; i < allocated.size(); i++) { + void *x = allocated[i]; + uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x)); + CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1); + CHECK_EQ(metadata[1], 0xABCD); + a.Deallocate(x); + } + allocated.clear(); + uptr total_allocated = a.TotalMemoryUsed(); + if (last_total_allocated == 0) + last_total_allocated = total_allocated; + CHECK_EQ(last_total_allocated, total_allocated); + } + + a.TestOnlyUnmap(); +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, SizeClassAllocator64) { + TestSizeClassAllocator<Allocator64>(); +} + +TEST(SanitizerCommon, SizeClassAllocator64Compact) { + TestSizeClassAllocator<Allocator64Compact>(); +} +#endif + +template <class Allocator> +void SizeClassAllocator64MetadataStress() { + Allocator a; + a.Init(); + static volatile void *sink; + + const uptr kNumAllocs = 10000; + void *allocated[kNumAllocs]; + for (uptr i = 0; i < kNumAllocs; i++) { + uptr size = (i % 4096) + 1; + void *x = a.Allocate(size, 1); + allocated[i] = x; + } + // Get Metadata kNumAllocs^2 times. + for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { + sink = a.GetMetaData(allocated[i % kNumAllocs]); + } + for (uptr i = 0; i < kNumAllocs; i++) { + a.Deallocate(allocated[i]); + } + + a.TestOnlyUnmap(); + (void)sink; +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { + SizeClassAllocator64MetadataStress<Allocator64>(); +} -namespace __sanitizer { +TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { + SizeClassAllocator64MetadataStress<Allocator64Compact>(); +} +#endif + +template<class Allocator> +void FailInAssertionOnOOM() { + Allocator a; + a.Init(); + const uptr size = 1 << 20; + for (int i = 0; i < 1000000; i++) { + a.Allocate(size, 1); + } + + a.TestOnlyUnmap(); +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, SizeClassAllocator64Overflow) { + EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory"); +} +#endif + +TEST(SanitizerCommon, LargeMmapAllocator) { + fprintf(stderr, "xxxx %ld\n", 0L); + LargeMmapAllocator a; + a.Init(); + + static const int kNumAllocs = 100; + void *allocated[kNumAllocs]; + static const uptr size = 1000; + // Allocate some. + for (int i = 0; i < kNumAllocs; i++) { + fprintf(stderr, "zzz0 %ld\n", size); + allocated[i] = a.Allocate(size, 1); + } + // Deallocate all. + CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); + for (int i = 0; i < kNumAllocs; i++) { + void *p = allocated[i]; + CHECK(a.PointerIsMine(p)); + a.Deallocate(p); + } + // Check that non left. + CHECK_EQ(a.TotalMemoryUsed(), 0); + + // Allocate some more, also add metadata. + for (int i = 0; i < kNumAllocs; i++) { + fprintf(stderr, "zzz1 %ld\n", size); + void *x = a.Allocate(size, 1); + CHECK_GE(a.GetActuallyAllocatedSize(x), size); + uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); + *meta = i; + allocated[i] = x; + } + CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); + // Deallocate all in reverse order. + for (int i = 0; i < kNumAllocs; i++) { + int idx = kNumAllocs - i - 1; + void *p = allocated[idx]; + uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); + CHECK_EQ(*meta, idx); + CHECK(a.PointerIsMine(p)); + a.Deallocate(p); + } + CHECK_EQ(a.TotalMemoryUsed(), 0); + uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24); + for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) { + for (int i = 0; i < kNumAllocs; i++) { + uptr size = ((i % 10) + 1) * 4096; + fprintf(stderr, "zzz1 %ld %ld\n", size, alignment); + allocated[i] = a.Allocate(size, alignment); + CHECK_EQ(0, (uptr)allocated[i] % alignment); + char *p = (char*)allocated[i]; + p[0] = p[size - 1] = 0; + } + for (int i = 0; i < kNumAllocs; i++) { + a.Deallocate(allocated[i]); + } + } +} + +template +<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache> +void TestCombinedAllocator() { + CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> a; + a.Init(); + + AllocatorCache cache; + cache.Init(); + + EXPECT_EQ(a.Allocate(&cache, -1, 1), (void*)0); + EXPECT_EQ(a.Allocate(&cache, -1, 1024), (void*)0); + EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); + EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); + EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); + + const uptr kNumAllocs = 100000; + const uptr kNumIter = 10; + for (uptr iter = 0; iter < kNumIter; iter++) { + std::vector<void*> allocated; + for (uptr i = 0; i < kNumAllocs; i++) { + uptr size = (i % (1 << 14)) + 1; + if ((i % 1024) == 0) + size = 1 << (10 + (i % 14)); + void *x = a.Allocate(&cache, size, 1); + uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); + CHECK_EQ(*meta, 0); + *meta = size; + allocated.push_back(x); + } + + random_shuffle(allocated.begin(), allocated.end()); + + for (uptr i = 0; i < kNumAllocs; i++) { + void *x = allocated[i]; + uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); + CHECK_NE(*meta, 0); + CHECK(a.PointerIsMine(x)); + *meta = 0; + a.Deallocate(&cache, x); + } + allocated.clear(); + a.SwallowCache(&cache); + } + a.TestOnlyUnmap(); +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, CombinedAllocator) { + TestCombinedAllocator<Allocator64, + LargeMmapAllocator, + SizeClassAllocatorLocalCache<Allocator64> > (); +} +#endif + +template <class AllocatorCache> +void TestSizeClassAllocatorLocalCache() { + static THREADLOCAL AllocatorCache static_allocator_cache; + static_allocator_cache.Init(); + AllocatorCache cache; + typename AllocatorCache::Allocator a; + + a.Init(); + cache.Init(); + + const uptr kNumAllocs = 10000; + const int kNumIter = 100; + uptr saved_total = 0; + for (int i = 0; i < kNumIter; i++) { + void *allocated[kNumAllocs]; + for (uptr i = 0; i < kNumAllocs; i++) { + allocated[i] = cache.Allocate(&a, 0); + } + for (uptr i = 0; i < kNumAllocs; i++) { + cache.Deallocate(&a, 0, allocated[i]); + } + cache.Drain(&a); + uptr total_allocated = a.TotalMemoryUsed(); + if (saved_total) + CHECK_EQ(saved_total, total_allocated); + saved_total = total_allocated; + } + + a.TestOnlyUnmap(); +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, SizeClassAllocator64LocalCache) { + TestSizeClassAllocatorLocalCache< + SizeClassAllocatorLocalCache<Allocator64> >(); +} +#endif TEST(Allocator, Basic) { char *p = (char*)InternalAlloc(10); @@ -54,5 +369,3 @@ TEST(Allocator, ScopedBuffer) { EXPECT_EQ('c', char_buf[i]); } } - -} // namespace __sanitizer diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h index c1b28fcc4a1..7754b744dac 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -27,7 +27,7 @@ #define TSAN_RTL_H #include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_allocator64.h" +#include "sanitizer_common/sanitizer_allocator.h" #include "tsan_clock.h" #include "tsan_defs.h" #include "tsan_flags.h" |