diff options
author | Evgeniy Stepanov <eugeni.stepanov@gmail.com> | 2016-09-15 22:34:53 +0000 |
---|---|---|
committer | Evgeniy Stepanov <eugeni.stepanov@gmail.com> | 2016-09-15 22:34:53 +0000 |
commit | 8652d6ac051e3247e8576b1035492d63937aba20 (patch) | |
tree | 94449d57b83122fbddb427ea7d2b04b0383ec9e6 /compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc | |
parent | 49e2967f2709b6333ae9373cf304af97a3ea785f (diff) | |
download | bcm5719-llvm-8652d6ac051e3247e8576b1035492d63937aba20.tar.gz bcm5719-llvm-8652d6ac051e3247e8576b1035492d63937aba20.zip |
[asan] Reenable 64-bit allocator on android/aarch64.
This is a re-commit of r281371, with updated tests.
llvm-svn: 281674
Diffstat (limited to 'compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc')
-rw-r--r-- | compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc | 51 |
1 files changed, 36 insertions, 15 deletions
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index 02620a7e551..30ff321fa50 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -38,17 +38,24 @@ using namespace __sanitizer; static const uptr kAllocatorSpace = ~(uptr)0; static const uptr kAllocatorSize = 0x8000000000ULL; // 500G static const u64 kAddressSpaceSize = 1ULL << 47; +typedef DefaultSizeClassMap SizeClassMap; +#elif SANITIZER_ANDROID && defined(__aarch64__) +static const uptr kAllocatorSpace = 0x3000000000ULL; +static const uptr kAllocatorSize = 0x2000000000ULL; +static const u64 kAddressSpaceSize = 1ULL << 39; +typedef VeryCompactSizeClassMap SizeClassMap; #else static const uptr kAllocatorSpace = 0x700000000000ULL; static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. static const u64 kAddressSpaceSize = 1ULL << 47; +typedef DefaultSizeClassMap SizeClassMap; #endif struct AP64 { // Allocator Params. Short name for shorter demangled names.. static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; static const uptr kMetadataSize = 16; - typedef DefaultSizeClassMap SizeClassMap; + typedef ::SizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; @@ -57,7 +64,7 @@ struct AP64Dyn { static const uptr kSpaceBeg = ~(uptr)0; static const uptr kSpaceSize = kAllocatorSize; static const uptr kMetadataSize = 16; - typedef DefaultSizeClassMap SizeClassMap; + typedef ::SizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; @@ -212,9 +219,12 @@ TEST(SanitizerCommon, SizeClassAllocator64Dynamic) { TestSizeClassAllocator<Allocator64Dynamic>(); } +#if !SANITIZER_ANDROID TEST(SanitizerCommon, SizeClassAllocator64Compact) { TestSizeClassAllocator<Allocator64Compact>(); } +#endif + TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) { TestSizeClassAllocator<Allocator64VeryCompact>(); } @@ -237,7 +247,7 @@ void SizeClassAllocatorMetadataStress() { void *allocated[kNumAllocs]; void *meta[kNumAllocs]; for (uptr i = 0; i < kNumAllocs; i++) { - void *x = cache.Allocate(a, 1 + i % 50); + void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1)); allocated[i] = x; meta[i] = a->GetMetaData(x); } @@ -248,7 +258,7 @@ void SizeClassAllocatorMetadataStress() { EXPECT_EQ(m, meta[idx]); } for (uptr i = 0; i < kNumAllocs; i++) { - cache.Deallocate(a, 1 + i % 50, allocated[i]); + cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]); } a->TestOnlyUnmap(); @@ -267,10 +277,13 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) { SizeClassAllocatorMetadataStress<Allocator64Dynamic>(); } +#if !SANITIZER_ANDROID TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { SizeClassAllocatorMetadataStress<Allocator64Compact>(); } #endif + +#endif #endif // SANITIZER_CAN_USE_ALLOCATOR64 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) { SizeClassAllocatorMetadataStress<Allocator32Compact>(); @@ -304,14 +317,18 @@ void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) { // to run them all at the same time. FIXME: Make them not flaky and reenable. #if !SANITIZER_WINDOWS TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { - SizeClassAllocatorGetBlockBeginStress<Allocator64>(1ULL << 33); + SizeClassAllocatorGetBlockBeginStress<Allocator64>( + 1ULL << (SANITIZER_ANDROID ? 31 : 33)); } TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) { - SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(1ULL << 33); + SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( + 1ULL << (SANITIZER_ANDROID ? 31 : 33)); } +#if !SANITIZER_ANDROID TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33); } +#endif TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) { // Does not have > 4Gb for each class. SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31); @@ -336,12 +353,12 @@ int TestMapUnmapCallback::unmap_count; #if !SANITIZER_WINDOWS struct AP64WithCallback { - static const uptr kSpaceBeg = kAllocatorSpace; - static const uptr kSpaceSize = kAllocatorSize; - static const uptr kMetadataSize = 16; - typedef DefaultSizeClassMap SizeClassMap; - typedef TestMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = 0; + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = 16; + typedef ::SizeClassMap SizeClassMap; + typedef TestMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; }; TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { @@ -358,7 +375,7 @@ TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { stats.Init(); const size_t kNumChunks = 128; uint32_t chunks[kNumChunks]; - a->GetFromAllocator(&stats, 32, chunks, kNumChunks); + a->GetFromAllocator(&stats, 30, chunks, kNumChunks); // State + alloc + metadata + freearray. EXPECT_EQ(TestMapUnmapCallback::map_count, 4); a->TestOnlyUnmap(); @@ -430,7 +447,7 @@ void FailInAssertionOnOOM() { // Don't test OOM conditions on Win64 because it causes other tests on the same // machine to OOM. -#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 +#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID TEST(SanitizerCommon, SizeClassAllocator64Overflow) { EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory"); } @@ -583,11 +600,13 @@ TEST(SanitizerCommon, CombinedAllocator64Dynamic) { SizeClassAllocatorLocalCache<Allocator64Dynamic> > (); } +#if !SANITIZER_ANDROID TEST(SanitizerCommon, CombinedAllocator64Compact) { TestCombinedAllocator<Allocator64Compact, LargeMmapAllocator<>, SizeClassAllocatorLocalCache<Allocator64Compact> > (); } +#endif TEST(SanitizerCommon, CombinedAllocator64VeryCompact) { TestCombinedAllocator<Allocator64VeryCompact, @@ -650,10 +669,12 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) { SizeClassAllocatorLocalCache<Allocator64Dynamic> >(); } +#if !SANITIZER_ANDROID TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) { TestSizeClassAllocatorLocalCache< SizeClassAllocatorLocalCache<Allocator64Compact> >(); } +#endif TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) { TestSizeClassAllocatorLocalCache< SizeClassAllocatorLocalCache<Allocator64VeryCompact> >(); @@ -908,7 +929,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { // Don't test OOM conditions on Win64 because it causes other tests on the same // machine to OOM. -#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 +#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap; struct AP64_SpecialSizeClassMap { static const uptr kSpaceBeg = kAllocatorSpace; |