diff options
author | Kostya Serebryany <kcc@google.com> | 2012-12-24 14:35:14 +0000 |
---|---|---|
committer | Kostya Serebryany <kcc@google.com> | 2012-12-24 14:35:14 +0000 |
commit | fc7de2910c236b7010a508ab0db3d879a98e8691 (patch) | |
tree | 3d7badccc33e4bb0a2b8c98eda1a26659e073272 | |
parent | 19969e504581c79a59a38eed35ee48151a6572ac (diff) | |
download | bcm5719-llvm-fc7de2910c236b7010a508ab0db3d879a98e8691.tar.gz bcm5719-llvm-fc7de2910c236b7010a508ab0db3d879a98e8691.zip |
[sanitizer] make LargeMmapAllocator::GetBlockBegin faster by not using a linked list
llvm-svn: 171035
-rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator.h | 55 | ||||
-rw-r--r-- | compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc | 21 |
2 files changed, 48 insertions, 28 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h index 81b0e6748b3..5eb830932f6 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -714,11 +714,9 @@ class LargeMmapAllocator { h->map_size = map_size; { SpinMutexLock l(&mutex_); - h->next = list_; - h->prev = 0; - if (list_) - list_->prev = h; - list_ = h; + uptr idx = n_chunks_++; + h->chunk_idx = idx; + chunks_[idx] = h; } return reinterpret_cast<void*>(res); } @@ -727,14 +725,12 @@ class LargeMmapAllocator { Header *h = GetHeader(p); { SpinMutexLock l(&mutex_); - Header *prev = h->prev; - Header *next = h->next; - if (prev) - prev->next = next; - if (next) - next->prev = prev; - if (h == list_) - list_ = next; + uptr idx = h->chunk_idx; + CHECK_EQ(chunks_[idx], h); + CHECK_LT(idx, n_chunks_); + chunks_[idx] = chunks_[n_chunks_ - 1]; + chunks_[idx]->chunk_idx = idx; + n_chunks_--; } MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size); @@ -743,8 +739,10 @@ class LargeMmapAllocator { uptr TotalMemoryUsed() { SpinMutexLock l(&mutex_); uptr res = 0; - for (Header *l = list_; l; l = l->next) { - res += RoundUpMapSize(l->size); + for (uptr i = 0; i < n_chunks_; i++) { + Header *h = chunks_[i]; + CHECK_EQ(h->chunk_idx, i); + res += RoundUpMapSize(h->size); } return res; } @@ -765,20 +763,32 @@ class LargeMmapAllocator { void *GetBlockBegin(void *ptr) { uptr p = reinterpret_cast<uptr>(ptr); SpinMutexLock l(&mutex_); - for (Header *l = list_; l; l = l->next) { - if (p >= l->map_beg && p < l->map_beg + l->map_size) - return GetUser(l); + uptr nearest_chunk = 0; + // Cache-friendly linear search. + for (uptr i = 0; i < n_chunks_; i++) { + uptr ch = reinterpret_cast<uptr>(chunks_[i]); + if (p < ch) continue; // p is at left to this chunk, skip it. + if (p - ch < p - nearest_chunk) + nearest_chunk = ch; } - return 0; + if (!nearest_chunk) + return 0; + Header *h = reinterpret_cast<Header *>(nearest_chunk); + CHECK_GE(nearest_chunk, h->map_beg); + CHECK_LT(nearest_chunk, h->map_beg + h->map_size); + CHECK_LE(nearest_chunk, p); + if (h->map_beg + h->map_size < p) + return 0; + return GetUser(h); } private: + static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18); struct Header { uptr map_beg; uptr map_size; uptr size; - Header *next; - Header *prev; + uptr chunk_idx; }; Header *GetHeader(uptr p) { @@ -797,7 +807,8 @@ class LargeMmapAllocator { } uptr page_size_; - Header *list_; + Header *chunks_[kMaxNumChunks]; + uptr n_chunks_; SpinMutex mutex_; }; diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index f9c83da2259..1a4122bff8b 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -244,12 +244,13 @@ TEST(SanitizerCommon, LargeMmapAllocator) { LargeMmapAllocator<> a; a.Init(); - static const int kNumAllocs = 100; + static const int kNumAllocs = 1000; char *allocated[kNumAllocs]; - static const uptr size = 1000; + static const uptr size = 4000; // Allocate some. for (int i = 0; i < kNumAllocs; i++) { allocated[i] = (char *)a.Allocate(size, 1); + CHECK(a.PointerIsMine(allocated[i])); } // Deallocate all. CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); @@ -269,6 +270,11 @@ TEST(SanitizerCommon, LargeMmapAllocator) { *meta = i; allocated[i] = x; } + for (int i = 0; i < kNumAllocs * kNumAllocs; i++) { + char *p = allocated[i % kNumAllocs]; + CHECK(a.PointerIsMine(p)); + CHECK(a.PointerIsMine(p + 2000)); + } CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); // Deallocate all in reverse order. for (int i = 0; i < kNumAllocs; i++) { @@ -280,9 +286,12 @@ TEST(SanitizerCommon, LargeMmapAllocator) { a.Deallocate(p); } CHECK_EQ(a.TotalMemoryUsed(), 0); + + // Test alignments. uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24); for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) { - for (int i = 0; i < kNumAllocs; i++) { + const uptr kNumAlignedAllocs = 100; + for (int i = 0; i < kNumAlignedAllocs; i++) { uptr size = ((i % 10) + 1) * 4096; char *p = allocated[i] = (char *)a.Allocate(size, alignment); CHECK_EQ(p, a.GetBlockBegin(p)); @@ -291,7 +300,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) { CHECK_EQ(0, (uptr)allocated[i] % alignment); p[0] = p[size - 1] = 0; } - for (int i = 0; i < kNumAllocs; i++) { + for (int i = 0; i < kNumAlignedAllocs; i++) { a.Deallocate(allocated[i]); } } @@ -381,7 +390,7 @@ void TestSizeClassAllocatorLocalCache() { const int kNumIter = 100; uptr saved_total = 0; for (int class_id = 1; class_id <= 5; class_id++) { - for (int i = 0; i < kNumIter; i++) { + for (int it = 0; it < kNumIter; it++) { void *allocated[kNumAllocs]; for (uptr i = 0; i < kNumAllocs; i++) { allocated[i] = cache.Allocate(a, class_id); @@ -391,7 +400,7 @@ void TestSizeClassAllocatorLocalCache() { } cache.Drain(a); uptr total_allocated = a->TotalMemoryUsed(); - if (i) + if (it) CHECK_EQ(saved_total, total_allocated); saved_total = total_allocated; } |