summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-05-11 21:40:45 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-05-11 21:40:45 +0000
commit01a66fc928a18a0df6a53b1068a987a1a3c65029 (patch)
treec3c1bc2f82080358edf9116c8b3f52507da3b0da
parentc103ef89ee7abb5581ad26a3b85a31b44aa4e500 (diff)
downloadbcm5719-llvm-01a66fc928a18a0df6a53b1068a987a1a3c65029.tar.gz
bcm5719-llvm-01a66fc928a18a0df6a53b1068a987a1a3c65029.zip
[scudo] Use our own combined allocator
Summary: The reasoning behind this change is twofold: - the current combined allocator (sanitizer_allocator_combined.h) implements features that are not relevant for Scudo, making some code redundant, and some restrictions not pertinent (alignments for example). This forced us to do some weird things between the frontend and our secondary to make things work; - we have enough information to be able to know if a chunk will be serviced by the Primary or Secondary, allowing us to avoid extraneous calls to functions such as `PointerIsMine` or `CanAllocate`. As a result, the new scudo-specific combined allocator is very straightforward, and allows us to remove some now unnecessary code both in the frontend and the secondary. Unused functions have been left in as unimplemented for now. It turns out to also be a sizeable performance gain (3% faster in some Android memory_replay benchmarks, doing some more on other platforms). Reviewers: alekseyshl, kcc, dvyukov Reviewed By: alekseyshl Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D33007 llvm-svn: 302830
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator.cpp80
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator.h5
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator_combined.h84
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator_secondary.h101
4 files changed, 155 insertions, 115 deletions
diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp
index 2b7f099dfef..ce69ddf5553 100644
--- a/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -73,8 +73,9 @@ struct ScudoChunk : UnpackedHeader {
// Returns the usable size for a chunk, meaning the amount of bytes from the
// beginning of the user data to the end of the backend allocated chunk.
uptr getUsableSize(UnpackedHeader *Header) {
- uptr Size = getBackendAllocator().GetActuallyAllocatedSize(
- getAllocBeg(Header));
+ uptr Size =
+ getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header),
+ Header->FromPrimary);
if (Size == 0)
return 0;
return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
@@ -221,7 +222,8 @@ struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *Cache)
: Cache_(Cache) {}
- // Chunk recycling function, returns a quarantined chunk to the backend.
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
void Recycle(ScudoChunk *Chunk) {
UnpackedHeader Header;
Chunk->loadHeader(&Header);
@@ -231,17 +233,19 @@ struct QuarantineCallback {
}
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(&Header);
- getBackendAllocator().Deallocate(Cache_, Ptr);
+ getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary);
}
- // Internal quarantine allocation and deallocation functions.
+ // Internal quarantine allocation and deallocation functions. We first check
+ // that the batches are indeed serviced by the Primary.
+ // TODO(kostyak): figure out the best way to protect the batches.
+ COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
void *Allocate(uptr Size) {
- // TODO(kostyak): figure out the best way to protect the batches.
- return getBackendAllocator().Allocate(Cache_, Size, MinAlignment);
+ return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true);
}
void Deallocate(void *Ptr) {
- getBackendAllocator().Deallocate(Cache_, Ptr);
+ getBackendAllocator().Deallocate(Cache_, Ptr, true);
}
AllocatorCache *Cache_;
@@ -359,58 +363,55 @@ struct ScudoAllocator {
Size = 1;
uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
- if (Alignment > MinAlignment)
- NeededSize += Alignment;
- if (NeededSize >= MaxAllowedMallocSize)
+ uptr AlignedSize = (Alignment > MinAlignment) ?
+ NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
+ if (AlignedSize >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDieOnBadRequest();
- // Primary backed and Secondary backed allocations have a different
- // treatment. We deal with alignment requirements of Primary serviced
- // allocations here, but the Secondary will take care of its own alignment
- // needs, which means we also have to work around some limitations of the
- // combined allocator to accommodate the situation.
- bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
+ // Primary and Secondary backed allocations have a different treatment. We
+ // deal with alignment requirements of Primary serviced allocations here,
+ // but the Secondary will take care of its own alignment needs.
+ bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
void *Ptr;
uptr Salt;
+ uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize;
uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
Salt = getPrng(ThreadContext)->getNext();
Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
- NeededSize, AllocationAlignment);
+ AllocationSize, AllocationAlignment,
+ FromPrimary);
ThreadContext->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getNext();
- Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
- AllocationAlignment);
+ Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize,
+ AllocationAlignment, FromPrimary);
}
if (!Ptr)
return BackendAllocator.ReturnNullOrDieOnOOM();
- uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
- // If the allocation was serviced by the secondary, the returned pointer
- // accounts for ChunkHeaderSize to pass the alignment check of the combined
- // allocator. Adjust it here.
- if (!FromPrimary) {
- AllocBeg -= AlignedChunkHeaderSize;
- if (Alignment > MinAlignment)
- NeededSize -= Alignment;
- }
-
// If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && FromPrimary)
- memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
+ memset(Ptr, 0,
+ BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary));
+ UnpackedHeader Header = {};
+ uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
- if (!IsAligned(UserBeg, Alignment))
+ if (!IsAligned(UserBeg, Alignment)) {
+ // Since the Secondary takes care of alignment, a non-aligned pointer
+ // means it is from the Primary. It is also the only case where the offset
+ // field of the header would be non-zero.
+ CHECK(FromPrimary);
UserBeg = RoundUpTo(UserBeg, Alignment);
- CHECK_LE(UserBeg + Size, AllocBeg + NeededSize);
- UnpackedHeader Header = {};
+ uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
+ Header.Offset = Offset >> MinAlignmentLog;
+ }
+ CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize);
Header.State = ChunkAllocated;
- uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
- Header.Offset = Offset >> MinAlignmentLog;
Header.AllocType = Type;
if (FromPrimary) {
Header.FromPrimary = FromPrimary;
@@ -437,17 +438,20 @@ struct ScudoAllocator {
// with no additional security value.
void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
uptr Size) {
+ bool FromPrimary = Header->FromPrimary;
bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
if (BypassQuarantine) {
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header);
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
- getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr);
+ getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr,
+ FromPrimary);
ThreadContext->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
- getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
+ getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr,
+ FromPrimary);
}
} else {
UnpackedHeader NewHeader = *Header;
diff --git a/compiler-rt/lib/scudo/scudo_allocator.h b/compiler-rt/lib/scudo/scudo_allocator.h
index f159deffb1d..2dad7320cfa 100644
--- a/compiler-rt/lib/scudo/scudo_allocator.h
+++ b/compiler-rt/lib/scudo/scudo_allocator.h
@@ -107,11 +107,12 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
#endif // SANITIZER_CAN_USE_ALLOCATOR64
#include "scudo_allocator_secondary.h"
+#include "scudo_allocator_combined.h"
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef ScudoLargeMmapAllocator SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
- ScudoBackendAllocator;
+typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> ScudoBackendAllocator;
void initScudo();
diff --git a/compiler-rt/lib/scudo/scudo_allocator_combined.h b/compiler-rt/lib/scudo/scudo_allocator_combined.h
new file mode 100644
index 00000000000..c978db55a9d
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator_combined.h
@@ -0,0 +1,84 @@
+//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Combined Allocator, dispatches allocation & deallocation requests to
+/// the Primary or the Secondary backend allocators.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMBINED_H_
+#define SCUDO_ALLOCATOR_COMBINED_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+#error "This file must be included inside scudo_allocator.h."
+#endif
+
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator>
+class ScudoCombinedAllocator {
+ public:
+ void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) {
+ Primary.Init(ReleaseToOSIntervalMs);
+ Secondary.Init(AllocatorMayReturnNull);
+ Stats.Init();
+ atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
+ }
+
+ void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
+ bool FromPrimary) {
+ if (FromPrimary)
+ return Cache->Allocate(&Primary, Primary.ClassID(Size));
+ return Secondary.Allocate(&Stats, Size, Alignment);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (atomic_load_relaxed(&MayReturnNull))
+ return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (atomic_load_relaxed(&MayReturnNull))
+ return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
+ if (FromPrimary)
+ Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
+ else
+ Secondary.Deallocate(&Stats, Ptr);
+ }
+
+ uptr GetActuallyAllocatedSize(void *Ptr, bool FromPrimary) {
+ if (FromPrimary)
+ return Primary.GetActuallyAllocatedSize(Ptr);
+ return Secondary.GetActuallyAllocatedSize(Ptr);
+ }
+
+ void InitCache(AllocatorCache *Cache) {
+ Cache->Init(&Stats);
+ }
+
+ void DestroyCache(AllocatorCache *Cache) {
+ Cache->Destroy(&Primary, &Stats);
+ }
+
+ void GetStats(AllocatorStatCounters StatType) const {
+ Stats.Get(StatType);
+ }
+
+ private:
+ PrimaryAllocator Primary;
+ SecondaryAllocator Secondary;
+ AllocatorGlobalStats Stats;
+ atomic_uint8_t MayReturnNull;
+};
+
+#endif // SCUDO_ALLOCATOR_COMBINED_H_
diff --git a/compiler-rt/lib/scudo/scudo_allocator_secondary.h b/compiler-rt/lib/scudo/scudo_allocator_secondary.h
index fbc7f247d70..2950909b547 100644
--- a/compiler-rt/lib/scudo/scudo_allocator_secondary.h
+++ b/compiler-rt/lib/scudo/scudo_allocator_secondary.h
@@ -26,20 +26,19 @@ class ScudoLargeMmapAllocator {
void Init(bool AllocatorMayReturnNull) {
PageSize = GetPageSizeCached();
- atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
+ atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+ uptr UserSize = Size - AlignedChunkHeaderSize;
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr MapSize = Size + SecondaryHeaderSize;
+ if (Alignment > MinAlignment)
+ MapSize += Alignment;
MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
- // The size passed to the Secondary comprises the alignment, if large
- // enough. Subtract it here to get the requested size, including header.
- if (Alignment > MinAlignment)
- Size -= Alignment;
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
@@ -51,32 +50,32 @@ class ScudoLargeMmapAllocator {
// initial guard page, and both headers. This is the pointer that has to
// abide by alignment requirements.
uptr UserBeg = MapBeg + PageSize + HeadersSize;
+ uptr UserEnd = UserBeg + UserSize;
// In the rare event of larger alignments, we will attempt to fit the mmap
// area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) {
- if (UserBeg & (Alignment - 1))
- UserBeg += Alignment - (UserBeg & (Alignment - 1));
- CHECK_GE(UserBeg, MapBeg);
- uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
- CHECK_GE(NewMapBeg, MapBeg);
- uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
- PageSize) + PageSize;
- CHECK_LE(NewMapEnd, MapEnd);
- // Unmap the extra memory if it's large enough, on both sides.
- uptr Diff = NewMapBeg - MapBeg;
- if (Diff > PageSize)
- UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
- Diff = MapEnd - NewMapEnd;
- if (Diff > PageSize)
- UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
- MapBeg = NewMapBeg;
- MapEnd = NewMapEnd;
- MapSize = NewMapEnd - NewMapBeg;
+ if (!IsAligned(UserBeg, Alignment)) {
+ UserBeg = RoundUpTo(UserBeg, Alignment);
+ CHECK_GE(UserBeg, MapBeg);
+ uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
+ PageSize;
+ CHECK_GE(NewMapBeg, MapBeg);
+ if (NewMapBeg != MapBeg) {
+ UnmapOrDie(reinterpret_cast<void *>(MapBeg), NewMapBeg - MapBeg);
+ MapBeg = NewMapBeg;
+ }
+ UserEnd = UserBeg + UserSize;
+ }
+ uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
+ if (NewMapEnd != MapEnd) {
+ UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd);
+ MapEnd = NewMapEnd;
+ }
+ MapSize = MapEnd - MapBeg;
}
- uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
CHECK_LE(UserEnd, MapEnd - PageSize);
// Actually mmap the memory, preserving the guard pages on either side.
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
@@ -94,25 +93,15 @@ class ScudoLargeMmapAllocator {
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
}
- return reinterpret_cast<void *>(UserBeg);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (atomic_load(&MayReturnNull, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
+ return reinterpret_cast<void *>(Ptr);
}
void *ReturnNullOrDieOnOOM() {
- if (atomic_load(&MayReturnNull, memory_order_acquire))
+ if (atomic_load_relaxed(&MayReturnNull))
return nullptr;
ReportAllocatorCannotReturnNull(true);
}
- void SetMayReturnNull(bool AllocatorMayReturnNull) {
- atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
- }
-
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
{
@@ -123,14 +112,6 @@ class ScudoLargeMmapAllocator {
UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
}
- uptr TotalMemoryUsed() {
- UNIMPLEMENTED();
- }
-
- bool PointerIsMine(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
uptr GetActuallyAllocatedSize(void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
// Deduct PageSize as MapSize includes the trailing guard page.
@@ -138,39 +119,9 @@ class ScudoLargeMmapAllocator {
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
- void *GetMetaData(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void *GetBlockBegin(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void *GetBlockBeginFastLocked(void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void PrintStats() {
- UNIMPLEMENTED();
- }
-
- void ForceLock() {
- UNIMPLEMENTED();
- }
-
- void ForceUnlock() {
- UNIMPLEMENTED();
- }
-
- void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
- UNIMPLEMENTED();
- }
-
private:
// A Secondary allocated chunk header contains the base of the mapping and
- // its size. Currently, the base is always a page before the header, but
- // we might want to extend that number in the future based on the size of
- // the allocation.
+ // its size, which comprises the guard pages.
struct SecondaryHeader {
uptr MapBeg;
uptr MapSize;
OpenPOWER on IntegriCloud