summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-10-23 17:12:07 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-10-23 17:12:07 +0000
commit028c4cddad4bb4f2979c53aba3cb2e00d1f50397 (patch)
treeaae6a0b58c977e35e819e86dd8829a116907659f
parentb791802aef27656581b3a33acf9953974c8c28da (diff)
downloadbcm5719-llvm-028c4cddad4bb4f2979c53aba3cb2e00d1f50397.tar.gz
bcm5719-llvm-028c4cddad4bb4f2979c53aba3cb2e00d1f50397.zip
[Sanitizers] New sanitizer API to purge allocator quarantine.
Summary: Purging allocator quarantine and returning memory to OS might be desired between fuzzer iterations since, most likely, the quarantine is not going to catch bugs in the code under fuzz, but reducing RSS might significantly prolong the fuzzing session. Reviewers: cryptoad Subscribers: kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D39153 llvm-svn: 316347
-rw-r--r--compiler-rt/include/sanitizer/allocator_interface.h7
-rw-r--r--compiler-rt/lib/asan/asan_allocator.cc20
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h25
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h33
-rw-r--r--compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc18
9 files changed, 89 insertions, 26 deletions
diff --git a/compiler-rt/include/sanitizer/allocator_interface.h b/compiler-rt/include/sanitizer/allocator_interface.h
index 5220631619f..2d35804b376 100644
--- a/compiler-rt/include/sanitizer/allocator_interface.h
+++ b/compiler-rt/include/sanitizer/allocator_interface.h
@@ -76,6 +76,13 @@ extern "C" {
void (*malloc_hook)(const volatile void *, size_t),
void (*free_hook)(const volatile void *));
+ /* Drains allocator quarantines (calling thread's and global ones), returns
+ freed memory back to OS and releases other non-essential internal allocator
+ resources in attempt to reduce process RSS.
+ Currently available with ASan only.
+ */
+ void __sanitizer_purge_allocator();
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/compiler-rt/lib/asan/asan_allocator.cc b/compiler-rt/lib/asan/asan_allocator.cc
index c98f9a89cc7..4b7e84ca546 100644
--- a/compiler-rt/lib/asan/asan_allocator.cc
+++ b/compiler-rt/lib/asan/asan_allocator.cc
@@ -716,6 +716,22 @@ struct Allocator {
return AsanChunkView(m1);
}
+ void Purge() {
+ AsanThread *t = GetCurrentThread();
+ if (t) {
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ quarantine.DrainAndRecycle(GetQuarantineCache(ms),
+ QuarantineCallback(GetAllocatorCache(ms)));
+ }
+ {
+ SpinMutexLock l(&fallback_mutex);
+ quarantine.DrainAndRecycle(&fallback_quarantine_cache,
+ QuarantineCallback(&fallback_allocator_cache));
+ }
+
+ allocator.ForceReleaseToOS();
+ }
+
void PrintStats() {
allocator.PrintStats();
quarantine.PrintStats();
@@ -1011,6 +1027,10 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
+void __sanitizer_purge_allocator() {
+ instance.Purge();
+}
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index efd25cadfe7..0d8a2a174bb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -77,6 +77,10 @@ class CombinedAllocator {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
+ void ForceReleaseToOS() {
+ primary_.ForceReleaseToOS();
+ }
+
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
index 13910e719e7..2f5ce3151ee 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -39,6 +39,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_purge_allocator();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
} // extern "C"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index 8f8a71d6751..8abf2f9f510 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -120,6 +120,10 @@ class SizeClassAllocator32 {
// This is empty here. Currently only implemented in 64-bit allocator.
}
+ void ForceReleaseToOS() {
+ // Currently implemented in 64-bit allocator only.
+ }
+
void *MapWithCallback(uptr size) {
void *res = MmapOrDie(size, "SizeClassAllocator32");
MapUnmapCallback().OnMap((uptr)res, size);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 931e01c28d8..1635efe8379 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -92,6 +92,13 @@ class SizeClassAllocator64 {
memory_order_relaxed);
}
+ void ForceReleaseToOS() {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
+ MaybeReleaseToOS(class_id, true /*force*/);
+ }
+ }
+
static bool CanAllocate(uptr size, uptr alignment) {
return size <= SizeClassMap::kMaxSize &&
alignment <= SizeClassMap::kMaxSize;
@@ -116,7 +123,7 @@ class SizeClassAllocator64 {
region->num_freed_chunks = new_num_freed_chunks;
region->stats.n_freed += n_chunks;
- MaybeReleaseToOS(class_id);
+ MaybeReleaseToOS(class_id, false /*force*/);
}
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
@@ -786,7 +793,7 @@ class SizeClassAllocator64 {
// Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked.
- void MaybeReleaseToOS(uptr class_id) {
+ void MaybeReleaseToOS(uptr class_id, bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
const uptr page_size = GetPageSizeCached();
@@ -799,12 +806,16 @@ class SizeClassAllocator64 {
return; // Nothing new to release.
}
- s32 interval_ms = ReleaseToOSIntervalMs();
- if (interval_ms < 0)
- return;
+ if (!force) {
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
- if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime())
- return; // Memory was returned recently.
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
+ NanoTime()) {
+ return; // Memory was returned recently.
+ }
+ }
MemoryMapper memory_mapper(*this, class_id);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
index 550427c906a..2568df3a5bf 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
@@ -34,6 +34,7 @@ INTERFACE_FUNCTION(__sanitizer_get_heap_size)
INTERFACE_FUNCTION(__sanitizer_get_ownership)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
+INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index db38867ced2..886445a2732 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -87,15 +87,14 @@ class Quarantine {
// is zero (it allows us to perform just one atomic read per Put() call).
CHECK((size == 0 && cache_size == 0) || cache_size != 0);
- atomic_store(&max_size_, size, memory_order_relaxed);
- atomic_store(&min_size_, size / 10 * 9,
- memory_order_relaxed); // 90% of max size.
- atomic_store(&max_cache_size_, cache_size, memory_order_relaxed);
+ atomic_store_relaxed(&max_size_, size);
+ atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&max_cache_size_, cache_size);
}
- uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); }
+ uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
uptr GetCacheSize() const {
- return atomic_load(&max_cache_size_, memory_order_relaxed);
+ return atomic_load_relaxed(&max_cache_size_);
}
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
@@ -117,7 +116,16 @@ class Quarantine {
cache_.Transfer(c);
}
if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
- Recycle(cb);
+ Recycle(atomic_load_relaxed(&min_size_), cb);
+ }
+
+ void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ recycle_mutex_.Lock();
+ Recycle(0, cb);
}
void PrintStats() const {
@@ -139,9 +147,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
- void NOINLINE Recycle(Callback cb) {
+ void NOINLINE Recycle(uptr min_size, Callback cb) {
Cache tmp;
- uptr min_size = atomic_load(&min_size_, memory_order_relaxed);
{
SpinMutexLock l(&cache_mutex_);
// Go over the batches and merge partially filled ones to
@@ -201,7 +208,7 @@ class QuarantineCache {
// Total memory used, including internal accounting.
uptr Size() const {
- return atomic_load(&size_, memory_order_relaxed);
+ return atomic_load_relaxed(&size_);
}
// Memory used for internal accounting.
@@ -225,7 +232,7 @@ class QuarantineCache {
list_.append_back(&from_cache->list_);
SizeAdd(from_cache->Size());
- atomic_store(&from_cache->size_, 0, memory_order_relaxed);
+ atomic_store_relaxed(&from_cache->size_, 0);
}
void EnqueueBatch(QuarantineBatch *b) {
@@ -296,10 +303,10 @@ class QuarantineCache {
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
- atomic_store(&size_, Size() + add, memory_order_relaxed);
+ atomic_store_relaxed(&size_, Size() + add);
}
void SizeSub(uptr sub) {
- atomic_store(&size_, Size() - sub, memory_order_relaxed);
+ atomic_store_relaxed(&size_, Size() - sub);
}
};
diff --git a/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc b/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc
index c85bcbb7f15..3e28ffde46a 100644
--- a/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc
+++ b/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc
@@ -1,18 +1,21 @@
// Tests ASAN_OPTIONS=allocator_release_to_os=1
-//
// RUN: %clangxx_asan -std=c++11 %s -o %t
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=0 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE
// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE
-//
+// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t force 2>&1 | FileCheck %s --check-prefix=FORCE_RELEASE
+
// REQUIRES: x86_64-target-arch
-#include <stdlib.h>
-#include <stdio.h>
+
#include <algorithm>
-#include <stdint.h>
#include <assert.h>
#include <random>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sanitizer/allocator_interface.h>
#include <sanitizer/asan_interface.h>
void MallocReleaseStress() {
@@ -39,10 +42,13 @@ void MallocReleaseStress() {
delete[] p;
}
-int main() {
+int main(int argc, char **argv) {
MallocReleaseStress();
+ if (argc > 1 && !strcmp("force", argv[1]))
+ __sanitizer_purge_allocator();
__asan_print_accumulated_stats();
}
// RELEASE: mapped:{{.*}}releases: {{[1-9]}}
// NO_RELEASE: mapped:{{.*}}releases: 0
+// FORCE_RELEASE: mapped:{{.*}}releases: {{[1-9]}}
OpenPOWER on IntegriCloud