diff options
| author | Dmitry Vyukov <dvyukov@google.com> | 2014-05-28 15:22:12 +0000 |
|---|---|---|
| committer | Dmitry Vyukov <dvyukov@google.com> | 2014-05-28 15:22:12 +0000 |
| commit | 5864ac39ee2cc3dd2b8169f2c4f552dde2cab0b5 (patch) | |
| tree | 4f477b669734c63b052731747d80c7d909fed2e7 /compiler-rt/lib/sanitizer_common/sanitizer_allocator.h | |
| parent | 1e1ad5cf9cea87d3a9b087eced4aa32f75df9462 (diff) | |
| download | bcm5719-llvm-5864ac39ee2cc3dd2b8169f2c4f552dde2cab0b5.tar.gz bcm5719-llvm-5864ac39ee2cc3dd2b8169f2c4f552dde2cab0b5.zip | |
tsan: do not use 64-bit atomics in allocator code
64-bit atomics make porting of asan to 32-bits platforms problematic.
llvm-svn: 209744
Diffstat (limited to 'compiler-rt/lib/sanitizer_common/sanitizer_allocator.h')
| -rw-r--r-- | compiler-rt/lib/sanitizer_common/sanitizer_allocator.h | 42 |
1 files changed, 24 insertions, 18 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h index a8debd971be..0172e73a705 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -198,14 +198,12 @@ template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache; // Memory allocator statistics enum AllocatorStat { - AllocatorStatMalloced, - AllocatorStatFreed, - AllocatorStatMmapped, - AllocatorStatUnmapped, + AllocatorStatAllocated, + AllocatorStatMapped, AllocatorStatCount }; -typedef u64 AllocatorStatCounters[AllocatorStatCount]; +typedef uptr AllocatorStatCounters[AllocatorStatCount]; // Per-thread stats, live in per-thread cache. class AllocatorStats { @@ -214,16 +212,21 @@ class AllocatorStats { internal_memset(this, 0, sizeof(*this)); } - void Add(AllocatorStat i, u64 v) { + void Add(AllocatorStat i, uptr v) { v += atomic_load(&stats_[i], memory_order_relaxed); atomic_store(&stats_[i], v, memory_order_relaxed); } - void Set(AllocatorStat i, u64 v) { + void Sub(AllocatorStat i, uptr v) { + v = atomic_load(&stats_[i], memory_order_relaxed) - v; atomic_store(&stats_[i], v, memory_order_relaxed); } - u64 Get(AllocatorStat i) const { + void Set(AllocatorStat i, uptr v) { + atomic_store(&stats_[i], v, memory_order_relaxed); + } + + uptr Get(AllocatorStat i) const { return atomic_load(&stats_[i], memory_order_relaxed); } @@ -231,7 +234,7 @@ class AllocatorStats { friend class AllocatorGlobalStats; AllocatorStats *next_; AllocatorStats *prev_; - atomic_uint64_t stats_[AllocatorStatCount]; + atomic_uintptr_t stats_[AllocatorStatCount]; }; // Global stats, used for aggregation and querying. @@ -260,7 +263,7 @@ class AllocatorGlobalStats : public AllocatorStats { } void Get(AllocatorStatCounters s) const { - internal_memset(s, 0, AllocatorStatCount * sizeof(u64)); + internal_memset(s, 0, AllocatorStatCount * sizeof(uptr)); SpinMutexLock l(&mu_); const AllocatorStats *stats = this; for (;;) { @@ -270,6 +273,9 @@ class AllocatorGlobalStats : public AllocatorStats { if (stats == this) break; } + // All stats must be positive. + for (int i = 0; i < AllocatorStatCount; i++) + s[i] = ((sptr)s[i]) > 0 ? s[i] : 1; } private: @@ -522,7 +528,7 @@ class SizeClassAllocator64 { map_size += kUserMapSize; CHECK_GE(region->mapped_user + map_size, end_idx); MapWithCallback(region_beg + region->mapped_user, map_size); - stat->Add(AllocatorStatMmapped, map_size); + stat->Add(AllocatorStatMapped, map_size); region->mapped_user += map_size; } uptr total_count = (region->mapped_user - beg_idx - size) @@ -841,7 +847,7 @@ class SizeClassAllocator32 { uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32")); MapUnmapCallback().OnMap(res, kRegionSize); - stat->Add(AllocatorStatMmapped, kRegionSize); + stat->Add(AllocatorStatMapped, kRegionSize); CHECK_EQ(0U, (res & (kRegionSize - 1))); possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); return res; @@ -907,7 +913,7 @@ struct SizeClassAllocatorLocalCache { void *Allocate(SizeClassAllocator *allocator, uptr class_id) { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); - stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id)); + stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id)); PerClass *c = &per_class_[class_id]; if (UNLIKELY(c->count == 0)) Refill(allocator, class_id); @@ -922,7 +928,7 @@ struct SizeClassAllocatorLocalCache { // If the first allocator call on a new thread is a deallocation, then // max_count will be zero, leading to check failure. InitCache(); - stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id)); + stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id)); PerClass *c = &per_class_[class_id]; CHECK_NE(c->max_count, 0UL); if (UNLIKELY(c->count == c->max_count)) @@ -1033,8 +1039,8 @@ class LargeMmapAllocator { stats.currently_allocated += map_size; stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); stats.by_size_log[size_log]++; - stat->Add(AllocatorStatMalloced, map_size); - stat->Add(AllocatorStatMmapped, map_size); + stat->Add(AllocatorStatAllocated, map_size); + stat->Add(AllocatorStatMapped, map_size); } return reinterpret_cast<void*>(res); } @@ -1052,8 +1058,8 @@ class LargeMmapAllocator { chunks_sorted_ = false; stats.n_frees++; stats.currently_allocated -= h->map_size; - stat->Add(AllocatorStatFreed, h->map_size); - stat->Add(AllocatorStatUnmapped, h->map_size); + stat->Sub(AllocatorStatAllocated, h->map_size); + stat->Sub(AllocatorStatMapped, h->map_size); } MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size); |

