diff options
author | Dean Michael Berris <dberris@google.com> | 2018-11-20 03:56:04 +0000 |
---|---|---|
committer | Dean Michael Berris <dberris@google.com> | 2018-11-20 03:56:04 +0000 |
commit | 388af45f18298cb6684dfb61ebeca2070468c8d7 (patch) | |
tree | 5ccd892abf09dc5226671bc8f06812179c10d640 /compiler-rt/lib/xray | |
parent | 25e44e7c33a639b3c5266901ba109fefa39ca8c4 (diff) | |
download | bcm5719-llvm-388af45f18298cb6684dfb61ebeca2070468c8d7.tar.gz bcm5719-llvm-388af45f18298cb6684dfb61ebeca2070468c8d7.zip |
[XRay] Add a test for allocator exhaustion
Use a more representative test of allocating small chunks for
oddly-sized (small) objects from an allocator that has a page's worth of
memory.
llvm-svn: 347286
Diffstat (limited to 'compiler-rt/lib/xray')
-rw-r--r-- | compiler-rt/lib/xray/tests/unit/allocator_test.cc | 20 | ||||
-rw-r--r-- | compiler-rt/lib/xray/xray_allocator.h | 15 |
2 files changed, 27 insertions, 8 deletions
diff --git a/compiler-rt/lib/xray/tests/unit/allocator_test.cc b/compiler-rt/lib/xray/tests/unit/allocator_test.cc index be404160e41..0177798b069 100644 --- a/compiler-rt/lib/xray/tests/unit/allocator_test.cc +++ b/compiler-rt/lib/xray/tests/unit/allocator_test.cc @@ -33,10 +33,28 @@ TEST(AllocatorTest, Allocate) { TEST(AllocatorTest, OverAllocate) { Allocator<sizeof(TestData)> A(sizeof(TestData)); auto B1 = A.Allocate(); - (void)B1; + ASSERT_NE(B1.Data, nullptr); auto B2 = A.Allocate(); ASSERT_EQ(B2.Data, nullptr); } +struct OddSizedData { + s64 A; + s32 B; +}; + +TEST(AllocatorTest, AllocateBoundaries) { + Allocator<sizeof(OddSizedData)> A(GetPageSizeCached()); + + // Keep allocating until we hit a nullptr block. + unsigned C = 0; + auto Expected = + GetPageSizeCached() / RoundUpTo(sizeof(OddSizedData), kCacheLineSize); + for (auto B = A.Allocate(); B.Data != nullptr; B = A.Allocate(), ++C) + ; + + ASSERT_EQ(C, Expected); +} + } // namespace } // namespace __xray diff --git a/compiler-rt/lib/xray/xray_allocator.h b/compiler-rt/lib/xray/xray_allocator.h index f77bccbd9c7..af63d9d3730 100644 --- a/compiler-rt/lib/xray/xray_allocator.h +++ b/compiler-rt/lib/xray/xray_allocator.h @@ -53,7 +53,8 @@ template <class T> void deallocate(T *B) XRAY_NEVER_INSTRUMENT { internal_munmap(B, RoundedSize); } -template <class T = uint8_t> T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT { +template <class T = unsigned char> +T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT { uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached()); uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); @@ -111,8 +112,8 @@ template <size_t N> struct Allocator { private: const size_t MaxMemory{0}; - uint8_t *BackingStore = nullptr; - uint8_t *AlignedNextBlock = nullptr; + unsigned char *BackingStore = nullptr; + unsigned char *AlignedNextBlock = nullptr; size_t AllocatedBlocks = 0; SpinMutex Mutex{}; @@ -141,7 +142,7 @@ private: return nullptr; } - AlignedNextBlock = reinterpret_cast<uint8_t *>(AlignedNextBlockNum); + AlignedNextBlock = reinterpret_cast<unsigned char *>(AlignedNextBlockNum); // Assert that AlignedNextBlock is cache-line aligned. DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize, @@ -154,15 +155,15 @@ private: // Align the pointer we'd like to return to an appropriate alignment, then // advance the pointer from where to start allocations. void *Result = AlignedNextBlock; - AlignedNextBlock = reinterpret_cast<uint8_t *>( - reinterpret_cast<uint8_t *>(AlignedNextBlock) + N); + AlignedNextBlock = reinterpret_cast<unsigned char *>( + reinterpret_cast<unsigned char *>(AlignedNextBlock) + N); ++AllocatedBlocks; return Result; } public: explicit Allocator(size_t M) XRAY_NEVER_INSTRUMENT - : MaxMemory(nearest_boundary(M, kCacheLineSize)) {} + : MaxMemory(RoundUpTo(M, kCacheLineSize)) {} Block Allocate() XRAY_NEVER_INSTRUMENT { return {Alloc()}; } |