summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r--compiler-rt/lib/asan/asan_allocator2.cc32
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.cc3
-rw-r--r--compiler-rt/lib/lsan/lsan_common.cc4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.h15
4 files changed, 35 insertions, 19 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator2.cc b/compiler-rt/lib/asan/asan_allocator2.cc
index d74aa553a28..67e6ef62a0d 100644
--- a/compiler-rt/lib/asan/asan_allocator2.cc
+++ b/compiler-rt/lib/asan/asan_allocator2.cc
@@ -528,9 +528,8 @@ static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
return new_ptr;
}
-static AsanChunk *GetAsanChunkByAddr(uptr p) {
- void *ptr = reinterpret_cast<void *>(p);
- uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
+// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+static AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return 0;
uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
if (memalign_magic[0] == kMemalignMagic) {
@@ -538,13 +537,13 @@ static AsanChunk *GetAsanChunkByAddr(uptr p) {
CHECK(m->from_memalign);
return m;
}
- if (!allocator.FromPrimary(ptr)) {
- uptr *meta = reinterpret_cast<uptr *>(
- allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
+ if (!allocator.FromPrimary(alloc_beg)) {
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
- uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
+ uptr actual_size =
+ allocator.GetActuallyAllocatedSize(alloc_beg);
CHECK_LE(actual_size, SizeClassMap::kMaxSize);
// We know the actually allocted size, but we don't know the redzone size.
// Just try all possible redzone sizes.
@@ -554,11 +553,23 @@ static AsanChunk *GetAsanChunkByAddr(uptr p) {
if (ComputeRZLog(max_possible_size) != rz_log)
continue;
return reinterpret_cast<AsanChunk *>(
- alloc_beg + rz_size - kChunkHeaderSize);
+ reinterpret_cast<uptr>(alloc_beg) + rz_size - kChunkHeaderSize);
}
return 0;
}
+static AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+}
+
+// Allocator must be locked when this function is called.
+static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+ void *alloc_beg =
+ allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+}
+
static uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
@@ -721,7 +732,7 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
void *PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
+ __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
@@ -730,7 +741,8 @@ void *PointsIntoChunk(void* p) {
}
void *GetUserBegin(void *p) {
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(reinterpret_cast<uptr>(p));
+ __asan::AsanChunk *m =
+ __asan::GetAsanChunkByAddrFastLocked(reinterpret_cast<uptr>(p));
CHECK(m);
return reinterpret_cast<void *>(m->Beg());
}
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cc b/compiler-rt/lib/lsan/lsan_allocator.cc
index 49b5a9fa4c5..3ae773b21bd 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cc
+++ b/compiler-rt/lib/lsan/lsan_allocator.cc
@@ -133,8 +133,7 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
}
void *PointsIntoChunk(void* p) {
- if (!allocator.PointerIsMine(p)) return 0;
- void *chunk = allocator.GetBlockBegin(p);
+ void *chunk = allocator.GetBlockBeginFastLocked(p);
if (!chunk) return 0;
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
// valid, but we don't want that.
diff --git a/compiler-rt/lib/lsan/lsan_common.cc b/compiler-rt/lib/lsan/lsan_common.cc
index e2971e999aa..f6e93ae09f1 100644
--- a/compiler-rt/lib/lsan/lsan_common.cc
+++ b/compiler-rt/lib/lsan/lsan_common.cc
@@ -236,7 +236,7 @@ static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) {
LockThreadRegistry();
LockAllocator();
StopTheWorld(callback, arg);
- // Allocator must be unlocked by the callback.
+ UnlockAllocator();
UnlockThreadRegistry();
}
@@ -293,8 +293,6 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
void *arg) {
LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg);
CHECK_EQ(*result, kFatalError);
- // Allocator must not be locked when we call GetRegionBegin().
- UnlockAllocator();
ClassifyAllChunks(suspended_threads);
LeakReport leak_report;
CollectLeaks(&leak_report);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 093f1fb9333..d24f42b3c04 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -1043,10 +1043,9 @@ class LargeMmapAllocator {
return GetUser(h);
}
- // This function does the same as GetBlockBegin, but much faster.
- // It may be called only in a single-threaded context, e.g. when all other
- // threads are suspended or joined.
- void *GetBlockBeginFastSingleThreaded(void *ptr) {
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return 0;
@@ -1238,6 +1237,14 @@ class CombinedAllocator {
return secondary_.GetBlockBegin(p);
}
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
uptr GetActuallyAllocatedSize(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetActuallyAllocatedSize(p);
OpenPOWER on IntegriCloud