summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h98
-rw-r--r--compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc43
2 files changed, 137 insertions, 4 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h
index 446173b605c..4b25158a41d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h
@@ -19,6 +19,7 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
namespace __sanitizer {
@@ -113,11 +114,11 @@ class SizeClassAllocator64 {
return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
}
- uptr GetMetaData(void *p) {
+ void *GetMetaData(void *p) {
uptr class_id = GetSizeClass(p);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
- return kSpaceBeg + (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize;
+ return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
+ (1 + chunk_idx) * kMetadataSize);
}
uptr TotalMemoryUsed() {
@@ -226,6 +227,97 @@ class SizeClassAllocator64 {
}
};
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+// The result is always page-aligned.
+class LargeMmapAllocator {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void *Allocate(uptr size) {
+ uptr map_size = RoundUpMapSize(size);
+ void *map = MmapOrDie(map_size, "LargeMmapAllocator");
+ void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
+ + kPageSize);
+ Header *h = GetHeader(res);
+ h->size = size;
+ {
+ // FIXME: lock
+ h->next = list_;
+ h->prev = 0;
+ if (list_)
+ list_->prev = h;
+ list_ = h;
+ }
+ return res;
+ }
+
+ void Deallocate(void *p) {
+ Header *h = GetHeader(p);
+ uptr map_size = RoundUpMapSize(h->size);
+ {
+ // FIXME: lock
+ Header *prev = h->prev;
+ Header *next = h->next;
+ if (prev)
+ prev->next = next;
+ if (next)
+ next->prev = prev;
+ if (h == list_)
+ list_ = next;
+ }
+ UnmapOrDie(h, map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ // FIXME: lock
+ uptr res = 0;
+ for (Header *l = list_; l; l = l->next) {
+ res += RoundUpMapSize(l->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(void *p) {
+ // Fast check.
+ if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
+ // FIXME: lock
+ for (Header *l = list_; l; l = l->next) {
+ if (GetUser(l) == p) return true;
+ }
+ return false;
+ }
+
+ // At least kPageSize/2 metadata bytes is available.
+ void *GetMetaData(void *p) {
+ return GetHeader(p) + 1;
+ }
+
+ private:
+ struct Header {
+ uptr size;
+ Header *next;
+ Header *prev;
+ };
+
+ Header *GetHeader(void *p) {
+ return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
+ }
+
+ void *GetUser(Header *h) {
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, kPageSize) + kPageSize;
+ }
+
+ Header *list_;
+ uptr lock_; // FIXME
+};
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
index e48c2949618..c0b7b7bd9f5 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
@@ -106,7 +106,7 @@ TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
16, SCMap> Allocator;
Allocator a;
a.Init();
- static volatile uptr sink;
+ static volatile void *sink;
const uptr kNumAllocs = 10000;
void *allocated[kNumAllocs];
@@ -144,3 +144,44 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
EXPECT_DEATH(FailInAssertionOnOOM(),
"allocated_user.*allocated_meta.*kRegionSize");
}
+
+TEST(SanitizerCommon, LargeMmapAllocator) {
+ LargeMmapAllocator a;
+ a.Init();
+
+ static const int kNumAllocs = 100;
+ void *allocated[kNumAllocs];
+ static const uptr size = 1000;
+ // Allocate some.
+ for (int i = 0; i < kNumAllocs; i++) {
+ allocated[i] = a.Allocate(size);
+ }
+ // Deallocate all.
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ for (int i = 0; i < kNumAllocs; i++) {
+ void *p = allocated[i];
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ // Check that non left.
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+
+ // Allocate some more, also add metadata.
+ for (int i = 0; i < kNumAllocs; i++) {
+ void *x = a.Allocate(size);
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ *meta = i;
+ allocated[i] = x;
+ }
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ // Deallocate all in reverse order.
+ for (int i = 0; i < kNumAllocs; i++) {
+ int idx = kNumAllocs - i - 1;
+ void *p = allocated[idx];
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
+ CHECK_EQ(*meta, idx);
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+}
OpenPOWER on IntegriCloud