summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Collingbourne <peter@pcc.me.uk>2019-12-09 13:02:24 -0800
committerPeter Collingbourne <peter@pcc.me.uk>2019-12-10 12:08:52 -0800
commite966416ff15178bf982430085be9ae69e5a511fa (patch)
tree61b992b46df3554096b0a0b0197770745ec9b551
parentfb4d8fe1a86232f4711c911e6feccce778e27afa (diff)
downloadbcm5719-llvm-e966416ff15178bf982430085be9ae69e5a511fa.tar.gz
bcm5719-llvm-e966416ff15178bf982430085be9ae69e5a511fa.zip
scudo: Move getChunkFromBlock() allocated check into caller. NFCI.
With tag-on-free we will need to get the chunk of a deallocated block. Change getChunkFromBlock() so that it doesn't check that the chunk is allocated, and move the check into the caller, so that it can be reused for this purpose. Differential Revision: https://reviews.llvm.org/D71291
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h26
1 files changed, 9 insertions, 17 deletions
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index b355a4746fa..02c998e666d 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -418,10 +418,11 @@ public:
auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
if (Block < From || Block >= To)
return;
- uptr ChunkSize;
- const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
- if (ChunkBase != InvalidChunk)
- Callback(ChunkBase, ChunkSize, Arg);
+ uptr Chunk;
+ Chunk::UnpackedHeader Header;
+ if (getChunkFromBlock(Block, &Chunk, &Header) &&
+ Header.State == Chunk::State::Allocated)
+ Callback(Chunk, getSize(reinterpret_cast<void *>(Chunk), &Header), Arg);
};
Primary.iterateOverBlocks(Lambda);
Secondary.iterateOverBlocks(Lambda);
@@ -483,9 +484,7 @@ private:
static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
"Minimal alignment must at least cover a chunk header.");
- // Constants used by the chunk iteration mechanism.
static const u32 BlockMarker = 0x44554353U;
- static const uptr InvalidChunk = ~static_cast<uptr>(0);
GlobalStats Stats;
TSDRegistryT TSDRegistry;
@@ -593,20 +592,13 @@ private:
}
}
- // This only cares about valid busy chunks. This might change in the future.
- uptr getChunkFromBlock(uptr Block, uptr *Size) {
+ bool getChunkFromBlock(uptr Block, uptr *Chunk,
+ Chunk::UnpackedHeader *Header) {
u32 Offset = 0;
if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
Offset = reinterpret_cast<u32 *>(Block)[1];
- const uptr P = Block + Offset + Chunk::getHeaderSize();
- const void *Ptr = reinterpret_cast<const void *>(P);
- Chunk::UnpackedHeader Header;
- if (!Chunk::isValid(Cookie, Ptr, &Header) ||
- Header.State != Chunk::State::Allocated)
- return InvalidChunk;
- if (Size)
- *Size = getSize(Ptr, &Header);
- return P;
+ *Chunk = Block + Offset + Chunk::getHeaderSize();
+ return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
}
uptr getStats(ScopedString *Str) {
OpenPOWER on IntegriCloud