summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2012-08-23 17:16:07 +0000
committerDmitry Vyukov <dvyukov@google.com>2012-08-23 17:16:07 +0000
commit7e6347402d921d3e4c867172e153134ea43d15d1 (patch)
treed03524d3c03852595a63aaf8f86c66b13801063d
parent65340a69e14f119e97ea6a68e067a8b0cd3a44df (diff)
downloadbcm5719-llvm-7e6347402d921d3e4c867172e153134ea43d15d1.tar.gz
bcm5719-llvm-7e6347402d921d3e4c867172e153134ea43d15d1.zip
tsan: fix new memory allocator
Deallocate: drain blocks to central cache if cached too much (instead of never drain). Allocate: batch allocate fixed amount of blocks (instead of all blocks). This significantly reduces memory consumption of large heavy-multithreaded programs. llvm-svn: 162447
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h47
-rw-r--r--compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc8
2 files changed, 45 insertions, 10 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h
index a8ea6f83db8..68a52a3d53a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator64.h
@@ -53,6 +53,13 @@ class DefaultSizeClassMap {
static const uptr u3 = u2 + (l4 - l3) / s3;
static const uptr u4 = u3 + (l5 - l4) / s4;
+ // Max cached in local cache blocks.
+ static const uptr c0 = 256;
+ static const uptr c1 = 64;
+ static const uptr c2 = 16;
+ static const uptr c3 = 4;
+ static const uptr c4 = 1;
+
public:
static const uptr kNumClasses = u4 + 1;
static const uptr kMaxSize = l5;
@@ -77,6 +84,15 @@ class DefaultSizeClassMap {
if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
return 0;
}
+
+ static uptr MaxCached(uptr class_id) {
+ if (class_id <= u0) return c0;
+ if (class_id <= u1) return c1;
+ if (class_id <= u2) return c2;
+ if (class_id <= u3) return c3;
+ if (class_id <= u4) return c4;
+ return 0;
+ }
};
struct AllocatorListNode {
@@ -131,10 +147,13 @@ class SizeClassAllocator64 {
PopulateFreeList(class_id, region);
}
CHECK(!region->free_list.empty());
- // Just take as many chunks as we have in the free list now.
- // FIXME: this might be too much.
- free_list->append_front(&region->free_list);
- CHECK(region->free_list.empty());
+ const uptr count = SizeClassMap::MaxCached(class_id);
+ for (uptr i = 0; i < count && !region->free_list.empty(); i++) {
+ AllocatorListNode *node = region->free_list.front();
+ region->free_list.pop_front();
+ free_list->push_front(node);
+ }
+ CHECK(!free_list->empty());
}
// Swallow the entire free_list for the given class_id.
@@ -184,6 +203,7 @@ class SizeClassAllocator64 {
static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
static const uptr kNumClasses = 256; // Power of two <= 256
+ typedef SizeClassMap SizeClassMapT;
private:
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
@@ -285,7 +305,10 @@ struct SizeClassAllocatorLocalCache {
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_LT(class_id, kNumClasses);
- free_lists_[class_id].push_front(reinterpret_cast<AllocatorListNode*>(p));
+ AllocatorFreeList *free_list = &free_lists_[class_id];
+ free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
+ if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
+ DrainHalf(allocator, class_id);
}
void Drain(SizeClassAllocator *allocator) {
@@ -296,7 +319,21 @@ struct SizeClassAllocatorLocalCache {
}
// private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
AllocatorFreeList free_lists_[kNumClasses];
+
+ void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
+ AllocatorFreeList *free_list = &free_lists_[class_id];
+ AllocatorFreeList half;
+ half.clear();
+ const uptr count = free_list->size() / 2;
+ for (uptr i = 0; i < count; i++) {
+ AllocatorListNode *node = free_list->front();
+ free_list->pop_front();
+ half.push_front(node);
+ }
+ allocator->BulkDeallocate(class_id, &half);
+ }
};
// This class can (de)allocate only large chunks of memory using mmap/unmap.
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
index 87d564b16d9..2dd680ce059 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
@@ -26,12 +26,10 @@ typedef SizeClassAllocatorLocalCache<Allocator::kNumClasses, Allocator>
TEST(SanitizerCommon, DefaultSizeClassMap) {
#if 0
for (uptr i = 0; i < SCMap::kNumClasses; i++) {
- // printf("% 3ld: % 5ld (%4lx); ", i, SCMap::Size(i), SCMap::Size(i));
- printf("c%ld => %ld ", i, SCMap::Size(i));
- if ((i % 8) == 7)
- printf("\n");
+ printf("c%ld => %ld cached=%ld(%ld)\n",
+ i, SCMap::Size(i), SCMap::MaxCached(i) * SCMap::Size(i),
+ SCMap::MaxCached(i));
}
- printf("\n");
#endif
for (uptr c = 0; c < SCMap::kNumClasses; c++) {
OpenPOWER on IntegriCloud