diff options
Diffstat (limited to 'compiler-rt')
| -rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator.h | 9 | ||||
| -rw-r--r-- | compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc | 19 |
2 files changed, 19 insertions, 9 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h index d0716a4a1c8..f17f08eab56 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -492,11 +492,12 @@ class SizeClassAllocator64 { } static uptr GetChunkIdx(uptr chunk, uptr size) { - u32 offset = chunk % kRegionSize; + uptr offset = chunk % kRegionSize; // Here we divide by a non-constant. This is costly. - // We require that kRegionSize is at least 2^32 so that offset is 32-bit. - // We save 2x by using 32-bit div, but may need to use a 256-way switch. - return offset / (u32)size; + // size always fits into 32-bits. If the offset fits too, use 32-bit div. + if (offset >> 32) + return offset / size; + return (u32)offset / (u32)size; } NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index 6b3bd95e713..445cbeed969 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -147,24 +147,26 @@ void SizeClassAllocatorMetadataStress() { SizeClassAllocatorLocalCache<Allocator> cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); - static volatile void *sink; - const uptr kNumAllocs = 10000; + const uptr kNumAllocs = 1 << 13; void *allocated[kNumAllocs]; + void *meta[kNumAllocs]; for (uptr i = 0; i < kNumAllocs; i++) { void *x = cache.Allocate(a, 1 + i % 50); allocated[i] = x; + meta[i] = a->GetMetaData(x); } // Get Metadata kNumAllocs^2 times. for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { - sink = a->GetMetaData(allocated[i % kNumAllocs]); + uptr idx = i % kNumAllocs; + void *m = a->GetMetaData(allocated[idx]); + EXPECT_EQ(m, meta[idx]); } for (uptr i = 0; i < kNumAllocs; i++) { cache.Deallocate(a, 1 + i % 50, allocated[i]); } a->TestOnlyUnmap(); - (void)sink; delete a; } @@ -192,6 +194,7 @@ void SizeClassAllocatorGetBlockBeginStress() { uptr max_size_class = Allocator::kNumClasses - 1; uptr size = Allocator::SizeClassMapT::Size(max_size_class); u64 G8 = 1ULL << 33; + // Make sure we correctly compute GetBlockBegin() w/o overflow. for (size_t i = 0; i <= G8 / size; i++) { void *x = cache.Allocate(a, max_size_class); void *beg = a->GetBlockBegin(x); @@ -205,9 +208,15 @@ void SizeClassAllocatorGetBlockBeginStress() { } #if SANITIZER_WORDSIZE == 64 -TEST(SanitizerCommon, DISABLED_SizeClassAllocator64GetBlockBegin) { +TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { SizeClassAllocatorGetBlockBeginStress<Allocator64>(); } +TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { + SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(); +} +TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) { + SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(); +} #endif // SANITIZER_WORDSIZE == 64 struct TestMapUnmapCallback { |

