diff options
author | Kostya Serebryany <kcc@google.com> | 2016-08-26 23:58:42 +0000 |
---|---|---|
committer | Kostya Serebryany <kcc@google.com> | 2016-08-26 23:58:42 +0000 |
commit | b72479b84a4f54b78375de565af307e3f101627f (patch) | |
tree | 11742b6e172ebe00c9032efbf278aba920fd41a7 | |
parent | 3bb32cc79c89cac91abcc230f41b2afa8c03b101 (diff) | |
download | bcm5719-llvm-b72479b84a4f54b78375de565af307e3f101627f.tar.gz bcm5719-llvm-b72479b84a4f54b78375de565af307e3f101627f.zip |
[asan] first attempt at releasing free-d memory back to the system using madvise. Requires quite some tuning.
llvm-svn: 279887
18 files changed, 182 insertions, 31 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator.cc b/compiler-rt/lib/asan/asan_allocator.cc index b45c23a2665..9f673a2aa16 100644 --- a/compiler-rt/lib/asan/asan_allocator.cc +++ b/compiler-rt/lib/asan/asan_allocator.cc @@ -654,6 +654,8 @@ struct Allocator { fallback_mutex.Unlock(); allocator.ForceUnlock(); } + + void ReleaseToOS() { allocator.ReleaseToOS(); } }; static Allocator instance(LINKER_INITIALIZED); @@ -695,8 +697,11 @@ StackTrace AsanChunkView::GetFreeStack() { return GetStackTraceFromId(GetFreeStackId()); } +void ReleaseToOS() { instance.ReleaseToOS(); } + void InitializeAllocator(const AllocatorOptions &options) { instance.Initialize(options); + SetAllocatorReleaseToOSCallback(ReleaseToOS); } void ReInitializeAllocator(const AllocatorOptions &options) { diff --git a/compiler-rt/lib/asan/asan_poisoning.cc b/compiler-rt/lib/asan/asan_poisoning.cc index e1a3f2d6d54..dafd06a30cb 100644 --- a/compiler-rt/lib/asan/asan_poisoning.cc +++ b/compiler-rt/lib/asan/asan_poisoning.cc @@ -69,7 +69,7 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { uptr page_size = GetPageSizeCached(); uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); - FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); + ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg); } void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { diff --git a/compiler-rt/lib/asan/asan_poisoning.h b/compiler-rt/lib/asan/asan_poisoning.h index 6344225f0f6..35905c0265f 100644 --- a/compiler-rt/lib/asan/asan_poisoning.h +++ b/compiler-rt/lib/asan/asan_poisoning.h @@ -86,7 +86,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( } } -// Calls __sanitizer::FlushUnneededShadowMemory() on +// Calls __sanitizer::ReleaseMemoryToOS() on // [MemToShadow(p), MemToShadow(p+size)] with proper rounding. void FlushUnneededASanShadowMemory(uptr p, uptr size); diff --git a/compiler-rt/lib/msan/msan_allocator.cc b/compiler-rt/lib/msan/msan_allocator.cc index f5d59047b7b..2b81efc3e3c 100644 --- a/compiler-rt/lib/msan/msan_allocator.cc +++ b/compiler-rt/lib/msan/msan_allocator.cc @@ -33,9 +33,9 @@ struct MsanMapUnmapCallback { // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. - FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size); + ReleaseMemoryToOS(MEM_TO_SHADOW(p), size); if (__msan_get_track_origins()) - FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size); + ReleaseMemoryToOS(MEM_TO_ORIGIN(p), size); } }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h index dceb64bbd3f..2c8e2a2bbd3 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -190,6 +190,8 @@ class CombinedAllocator { primary_.ForceUnlock(); } + void ReleaseToOS() { primary_.ReleaseToOS(); } + // Iterate over all existing chunks. // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h index 6aec1828d2d..9e23099aec4 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -231,6 +231,10 @@ class SizeClassAllocator32 { return 0; } + // This is empty here. Currently only implemented in 64-bit allocator. + void ReleaseToOS() { } + + typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h index 2af2684dac1..deaa03e31a8 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -61,11 +61,12 @@ class SizeClassAllocator64 { // When we know the size class (the region base) we can represent a pointer // as a 4-byte integer (offset from the region start shifted right by 4). typedef u32 CompactPtrT; + static const uptr kCompactPtrScale = 4; CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) { - return static_cast<CompactPtrT>((ptr - base) >> 4); + return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale); } uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) { - return base + (static_cast<uptr>(ptr32) << 4); + return base + (static_cast<uptr>(ptr32) << kCompactPtrScale); } void Init() { @@ -109,6 +110,7 @@ class SizeClassAllocator64 { for (uptr i = 0; i < n_chunks; i++) free_array[old_num_chunks + i] = chunks[i]; region->num_freed_chunks = new_num_freed_chunks; + region->n_freed += n_chunks; } NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id, @@ -127,6 +129,7 @@ class SizeClassAllocator64 { uptr base_idx = region->num_freed_chunks; for (uptr i = 0; i < n_chunks; i++) chunks[i] = free_array[base_idx + i]; + region->n_allocated += n_chunks; } @@ -206,6 +209,21 @@ class SizeClassAllocator64 { stats[class_id] = rss; } + void PrintStats(uptr class_id, uptr rss) { + RegionInfo *region = GetRegionInfo(class_id); + if (region->mapped_user == 0) return; + uptr in_use = region->n_allocated - region->n_freed; + uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); + Printf( + " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd " + "num_freed_chunks %zd" + " avail: %zd rss: %zdK releases: %zd\n", + class_id, ClassIdToSize(class_id), region->mapped_user >> 10, + region->n_allocated, region->n_freed, in_use, + region->num_freed_chunks, avail_chunks, rss >> 10, + region->rtoi.num_releases); + } + void PrintStats() { uptr total_mapped = 0; uptr n_allocated = 0; @@ -223,21 +241,8 @@ class SizeClassAllocator64 { for (uptr class_id = 0; class_id < kNumClasses; class_id++) rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id; GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses); - for (uptr class_id = 1; class_id < kNumClasses; class_id++) { - RegionInfo *region = GetRegionInfo(class_id); - if (region->mapped_user == 0) continue; - uptr in_use = region->n_allocated - region->n_freed; - uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); - Printf(" %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd" - " avail: %zd rss: %zdK\n", - class_id, - ClassIdToSize(class_id), - region->mapped_user >> 10, - region->n_allocated, - region->n_freed, - in_use, avail_chunks, - rss_stats[class_id] >> 10); - } + for (uptr class_id = 1; class_id < kNumClasses; class_id++) + PrintStats(class_id, rss_stats[class_id]); } // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone @@ -279,6 +284,11 @@ class SizeClassAllocator64 { GetPageSizeCached()); } + void ReleaseToOS() { + for (uptr class_id = 1; class_id < kNumClasses; class_id++) + ReleaseToOS(class_id); + } + typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; @@ -307,6 +317,13 @@ class SizeClassAllocator64 { static const uptr kMetaMapSize = 1 << 16; // Call mmap for free array memory with at least this size. static const uptr kFreeArrayMapSize = 1 << 16; + // Granularity of ReleaseToOs (aka madvise). + static const uptr kReleaseToOsGranularity = 1 << 12; + + struct ReleaseToOsInfo { + uptr n_freed_at_last_release; + uptr num_releases; + }; struct RegionInfo { BlockingMutex mutex; @@ -318,6 +335,7 @@ class SizeClassAllocator64 { uptr mapped_meta; // Bytes mapped for metadata. u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. uptr n_allocated, n_freed; // Just stats. + ReleaseToOsInfo rtoi; }; COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); @@ -430,6 +448,59 @@ class SizeClassAllocator64 { Die(); } } + + bool MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size, + CompactPtrT first, CompactPtrT last) { + uptr beg_ptr = CompactPtrToPointer(region_beg, first); + uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size; + CHECK_GE(end_ptr - beg_ptr, kReleaseToOsGranularity); + beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity); + end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity); + if (end_ptr == beg_ptr) return false; + ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr); + return true; + } + + // Releases some RAM back to OS. + // Algorithm: + // * Lock the region. + // * Sort the chunks. + // * Find ranges fully covered by free-d chunks + // * Release them to OS with madvise. + // + // TODO(kcc): make sure we don't do it too frequently. + void ReleaseToOS(uptr class_id) { + RegionInfo *region = GetRegionInfo(class_id); + uptr region_beg = GetRegionBeginBySizeClass(class_id); + CompactPtrT *free_array = GetFreeArray(region_beg); + uptr chunk_size = ClassIdToSize(class_id); + uptr scaled_chunk_size = chunk_size >> kCompactPtrScale; + const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale; + BlockingMutexLock l(®ion->mutex); + uptr n = region->num_freed_chunks; + if (n * chunk_size < kReleaseToOsGranularity) + return; // No chance to release anything. + if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size < + kReleaseToOsGranularity) + return; // Nothing new to release. + SortArray(free_array, n); + uptr beg = free_array[0]; + uptr prev = free_array[0]; + for (uptr i = 1; i < n; i++) { + uptr chunk = free_array[i]; + CHECK_GT(chunk, prev); + if (chunk - prev != scaled_chunk_size) { + CHECK_GT(chunk - prev, scaled_chunk_size); + if (prev + scaled_chunk_size - beg >= kScaledGranularity) { + MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev); + region->rtoi.n_freed_at_last_release = region->n_freed; + region->rtoi.num_releases++; + } + beg = chunk; + } + prev = chunk; + } + } }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.cc b/compiler-rt/lib/sanitizer_common/sanitizer_common.cc index 79fcbb1183f..de179be5fa4 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.cc @@ -157,6 +157,7 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, } typedef bool UptrComparisonFunction(const uptr &a, const uptr &b); +typedef bool U32ComparisonFunction(const u32 &a, const u32 &b); template<class T> static inline bool CompareLess(const T &a, const T &b) { @@ -167,6 +168,10 @@ void SortArray(uptr *array, uptr size) { InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess); } +void SortArray(u32 *array, uptr size) { + InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess); +} + const char *StripPathPrefix(const char *filepath, const char *strip_path_prefix) { if (!filepath) return nullptr; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h index 6c1d6a00a10..0df4b79d51e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -100,7 +100,7 @@ bool MprotectReadOnly(uptr addr, uptr size); // Used to check if we can map shadow memory to a fixed location. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); -void FlushUnneededShadowMemory(uptr addr, uptr size); +void ReleaseMemoryToOS(uptr addr, uptr size); void IncreaseTotalMmap(uptr size); void DecreaseTotalMmap(uptr size); uptr GetRSS(); @@ -330,6 +330,7 @@ void SleepForMillis(int millis); u64 NanoTime(); int Atexit(void (*function)(void)); void SortArray(uptr *array, uptr size); +void SortArray(u32 *array, uptr size); bool TemplateMatch(const char *templ, const char *str); // Exit @@ -371,6 +372,12 @@ void SetCheckFailedCallback(CheckFailedCallbackType callback); // The callback should be registered once at the tool init time. void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); +// Callback to be called when we want to try releasing unused allocator memory +// back to the OS. +typedef void (*AllocatorReleaseToOSCallback)(); +// The callback should be registered once at the tool init time. +void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback); + // Functions related to signal handling. typedef void (*SignalHandlerType)(int, void *, void *); bool IsHandledDeadlySignal(int signum); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cc b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cc index 596f5bcd317..1727f24c31a 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cc @@ -69,9 +69,16 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) { SoftRssLimitExceededCallback = Callback; } +static AllocatorReleaseToOSCallback ReleseCallback; +void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) { + CHECK_EQ(ReleseCallback, nullptr); + ReleseCallback = Callback; +} + void BackgroundThread(void *arg) { uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; + bool allocator_release_to_os = common_flags()->allocator_release_to_os; uptr prev_reported_rss = 0; uptr prev_reported_stack_depot_size = 0; bool reached_soft_rss_limit = false; @@ -116,6 +123,7 @@ void BackgroundThread(void *arg) { SoftRssLimitExceededCallback(false); } } + if (allocator_release_to_os && ReleseCallback) ReleseCallback(); } } @@ -142,7 +150,8 @@ void MaybeStartBackgroudThread() { !SANITIZER_GO // Need to implement/test on other platforms. // Start the background thread if one of the rss limits is given. if (!common_flags()->hard_rss_limit_mb && - !common_flags()->soft_rss_limit_mb) return; + !common_flags()->soft_rss_limit_mb && + !common_flags()->allocator_release_to_os) return; if (!&real_pthread_create) return; // Can't spawn the thread anyway. internal_start_thread(BackgroundThread, nullptr); #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc index 3fcfb83e1ab..d3c60c5fc4e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc @@ -118,6 +118,9 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0, " until the RSS goes below the soft limit." " This limit does not affect memory allocations other than" " malloc/new.") +COMMON_FLAG(bool, allocator_release_to_os, false, + "Experimental. If true, try to periodically release unused" + " memory to the OS.\n") COMMON_FLAG(bool, can_use_proc_maps_statm, true, "If false, do not attempt to read /proc/maps/statm." " Mostly useful for testing sanitizers.") diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cc index f1e8b50a2cf..a1c26e47f59 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cc @@ -54,7 +54,7 @@ uptr GetThreadSelf() { return (uptr)pthread_self(); } -void FlushUnneededShadowMemory(uptr addr, uptr size) { +void ReleaseMemoryToOS(uptr addr, uptr size) { madvise((void*)addr, size, MADV_DONTNEED); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cc b/compiler-rt/lib/sanitizer_common/sanitizer_win.cc index cdb2948214a..04e1e11bbec 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cc @@ -235,13 +235,13 @@ bool MprotectNoAccess(uptr addr, uptr size) { } -void FlushUnneededShadowMemory(uptr addr, uptr size) { +void ReleaseMemoryToOS(uptr addr, uptr size) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. } void NoHugePagesInRegion(uptr addr, uptr size) { - // FIXME: probably similar to FlushUnneededShadowMemory. + // FIXME: probably similar to ReleaseMemoryToOS. } void DontDumpShadowMemory(uptr addr, uptr length) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/compiler-rt/lib/tsan/rtl/tsan_mman.cc index f99ddb35bbd..555fa11c2a7 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cc @@ -54,7 +54,7 @@ struct MapUnmapCallback { diff = p + size - RoundDown(p + size, kPageSize); if (diff != 0) size -= diff; - FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio); + ReleaseMemoryToOS((uptr)MemToMeta(p), size / kMetaRatio); } }; diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc index cd80e17fce2..6c78a3b081f 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc @@ -134,7 +134,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { void FlushShadowMemoryCallback( const SuspendedThreadsList &suspended_threads_list, void *argument) { - FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg()); + ReleaseMemoryToOS(ShadowBeg(), ShadowEnd() - ShadowBeg()); } #endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc index bda75d1561c..8fe9bf8d1b3 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc @@ -235,7 +235,7 @@ static void StopBackgroundThread() { void DontNeedShadowFor(uptr addr, uptr size) { uptr shadow_beg = MemToShadow(addr); uptr shadow_end = MemToShadow(addr + size); - FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); + ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg); } void MapShadow(uptr addr, uptr size) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc index 13528ae980e..fbe6111f6ca 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc @@ -68,8 +68,8 @@ void ThreadContext::OnCreated(void *arg) { void ThreadContext::OnReset() { CHECK_EQ(sync.size(), 0); - FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event)); - //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace)); + ReleaseMemoryToOS(GetThreadTrace(tid), TraceSize() * sizeof(Event)); + //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace)); } void ThreadContext::OnDetached(void *arg) { diff --git a/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc b/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc new file mode 100644 index 00000000000..c461d034fcb --- /dev/null +++ b/compiler-rt/test/asan/TestCases/Linux/release_to_os_test.cc @@ -0,0 +1,45 @@ +// Tests ASAN_OPTIONS=allocator_release_to_os=1 +// + +// RUN: %clangxx_asan -std=c++11 %s -o %t +// RUN: %env_asan_opts=allocator_release_to_os=1 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE +// RUN: %env_asan_opts=allocator_release_to_os=0 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE +// +// REQUIRES: asan-64-bits +#include <stdio.h> +#include <algorithm> +#include <stdint.h> +#include <assert.h> + +#include <sanitizer/asan_interface.h> + +void MallocReleaseStress() { + const size_t kNumChunks = 10000; + const size_t kAllocSize = 100; + const size_t kNumIter = 100; + uintptr_t *chunks[kNumChunks] = {0}; + + for (size_t iter = 0; iter < kNumIter; iter++) { + std::random_shuffle(chunks, chunks + kNumChunks); + size_t to_replace = rand() % kNumChunks; + for (size_t i = 0; i < kNumChunks; i++) { + if (chunks[i]) + assert(chunks[i][0] == (uintptr_t)chunks[i]); + if (i < to_replace) { + delete [] chunks[i]; + chunks[i] = new uintptr_t[kAllocSize]; + chunks[i][0] = (uintptr_t)chunks[i]; + } + } + } + for (auto p : chunks) + delete[] p; +} + +int main() { + MallocReleaseStress(); + __asan_print_accumulated_stats(); +} + +// RELEASE: mapped:{{.*}}releases: {{[1-9]}} +// NO_RELEASE: mapped:{{.*}}releases: 0 |