summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2018-02-24 01:00:27 +0000
committerAlex Shlyapnikov <alekseys@google.com>2018-02-24 01:00:27 +0000
commit524946232b38103b0582a4aecf0c64e99843f8bd (patch)
tree3f76ec7ec7ae06096e449a76f5b1bd22d147719a
parent725c035f54718fe488fe2fd4b9d5c9576aca502b (diff)
downloadbcm5719-llvm-524946232b38103b0582a4aecf0c64e99843f8bd.tar.gz
bcm5719-llvm-524946232b38103b0582a4aecf0c64e99843f8bd.zip
[Sanitizers] Increase allocated chunk limit for LargeMmapAllocator
Summary: There are applications out there which allocate more than 1 << 18 large chunks of memory (those handled by LargeMmapAllocator, aka secondary allocator). For 64 bits, secondary allocator stores allocated chunks in a growing on demand region of memory, growing in blocks of 128K, up to 1 << 20 chunks total. Sanitizer internal allocator's secondary uses fixed size array storing up to 1 << 15 chunks (down to 256K from 2Mb of memory used for that array). Nothing is changed for 32 bits, chunks are still stored in the fixed size array (up to 1 << 15 chunks). Reviewers: eugenis Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits Differential Revision: https://reviews.llvm.org/D43693 llvm-svn: 326007
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h68
2 files changed, 64 insertions, 11 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
index 10536a9659c..2a1fa1d7f81 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -46,9 +46,12 @@ typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
+typedef LargeMmapAllocator<NoOpMapUnmapCallback,
+ LargeMmapAllocatorPtrArrayStatic>
+ SecondaryInternalAllocator;
+
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<NoOpMapUnmapCallback>
- > InternalAllocator;
+ SecondaryInternalAllocator> InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index fb3182c982d..cb84e8fe686 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -14,14 +14,66 @@
#error This file must be included inside sanitizer_allocator.h
#endif
+// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
+// allocated chunks. To be used in memory constrained or not memory hungry cases
+// (currently, 32 bits and internal allocator).
+class LargeMmapAllocatorPtrArrayStatic {
+ public:
+ INLINE void *Init() { return &p_[0]; }
+ INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
+ private:
+ static const int kMaxNumChunks = 1 << 15;
+ uptr p_[kMaxNumChunks];
+};
+
+// Much less restricted LargeMmapAllocator chunks list (comparing to
+// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
+// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
+// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
+class LargeMmapAllocatorPtrArrayDynamic {
+ public:
+ INLINE void *Init() {
+ uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
+ "sanitizer_large_allocator");
+ CHECK(p);
+ return reinterpret_cast<void*>(p);
+ }
+
+ INLINE void EnsureSpace(uptr n) {
+ CHECK_LT(n, kMaxNumChunks);
+ DCHECK(n <= n_reserved_);
+ if (UNLIKELY(n == n_reserved_)) {
+ address_range_.MapOrDie(
+ reinterpret_cast<uptr>(address_range_.base()) +
+ n_reserved_ * sizeof(uptr),
+ kChunksBlockCount * sizeof(uptr));
+ n_reserved_ += kChunksBlockCount;
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << 20;
+ static const int kChunksBlockCount = 1 << 14;
+ ReservedAddressRange address_range_;
+ uptr n_reserved_;
+};
+
+#if SANITIZER_WORDSIZE == 32
+typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
+#else
+typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
+#endif
+
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class PtrArrayT = DefaultLargeMmapAllocatorPtrArray>
class LargeMmapAllocator {
public:
void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
+ chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
}
void Init() {
@@ -63,11 +115,11 @@ class LargeMmapAllocator {
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
{
SpinMutexLock l(&mutex_);
+ ptr_array_.EnsureSpace(n_chunks_);
uptr idx = n_chunks_++;
- chunks_sorted_ = false;
- CHECK_LT(idx, kMaxNumChunks);
h->chunk_idx = idx;
chunks_[idx] = h;
+ chunks_sorted_ = false;
stats.n_allocs++;
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
@@ -85,9 +137,8 @@ class LargeMmapAllocator {
uptr idx = h->chunk_idx;
CHECK_EQ(chunks_[idx], h);
CHECK_LT(idx, n_chunks_);
- chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx] = chunks_[--n_chunks_];
chunks_[idx]->chunk_idx = idx;
- n_chunks_--;
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
@@ -223,7 +274,7 @@ class LargeMmapAllocator {
EnsureSortedChunks(); // Avoid doing the sort while iterating.
for (uptr i = 0; i < n_chunks_; i++) {
auto t = chunks_[i];
- callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ callback(reinterpret_cast<uptr>(GetUser(t)), arg);
// Consistency check: verify that the array did not change.
CHECK_EQ(chunks_[i], t);
CHECK_EQ(chunks_[i]->chunk_idx, i);
@@ -231,7 +282,6 @@ class LargeMmapAllocator {
}
private:
- static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
uptr map_beg;
uptr map_size;
@@ -257,7 +307,8 @@ class LargeMmapAllocator {
}
uptr page_size_;
- Header *chunks_[kMaxNumChunks];
+ Header **chunks_;
+ PtrArrayT ptr_array_;
uptr n_chunks_;
bool chunks_sorted_;
struct Stats {
@@ -266,4 +317,3 @@ class LargeMmapAllocator {
SpinMutex mutex_;
};
-
OpenPOWER on IntegriCloud