summaryrefslogtreecommitdiffstats
path: root/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
diff options
context:
space:
mode:
authorKeno Fischer <kfischer@college.harvard.edu>2015-12-16 11:13:23 +0000
committerKeno Fischer <kfischer@college.harvard.edu>2015-12-16 11:13:23 +0000
commit94f181a45fe03699ceb61aee11bf89d0ffd6a6c9 (patch)
treef306f28b4d30d3a9c673961639ade24f3dd71e8d /llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
parent859ad29b52070e3fb4f15bc633514c022900e622 (diff)
downloadbcm5719-llvm-94f181a45fe03699ceb61aee11bf89d0ffd6a6c9.tar.gz
bcm5719-llvm-94f181a45fe03699ceb61aee11bf89d0ffd6a6c9.zip
[SectionMemoryManager] Make better use of virtual memory
Summary: On Windows, the allocation granularity can be significantly larger than a page (64K), so with many small objects, just clearing the FreeMem list rapidly leaks quite a bit of virtual memory space (if not rss). Fix that by only removing those parts of the FreeMem blocks that overlap pages for which we are applying memory permissions, rather than dropping the FreeMem blocks entirely. Reviewers: lhames Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D15202 llvm-svn: 255760
Diffstat (limited to 'llvm/lib/ExecutionEngine/SectionMemoryManager.cpp')
-rw-r--r--llvm/lib/ExecutionEngine/SectionMemoryManager.cpp89
1 files changed, 66 insertions, 23 deletions
diff --git a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
index 5e89a945b80..e2f220862cf 100644
--- a/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
+++ b/llvm/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -15,6 +15,7 @@
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Process.h"
namespace llvm {
@@ -48,14 +49,27 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
// Look in the list of free memory regions and use a block there if one
// is available.
- for (sys::MemoryBlock &MB : MemGroup.FreeMem) {
- if (MB.size() >= RequiredSize) {
- Addr = (uintptr_t)MB.base();
- uintptr_t EndOfBlock = Addr + MB.size();
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ if (FreeMB.Free.size() >= RequiredSize) {
+ Addr = (uintptr_t)FreeMB.Free.base();
+ uintptr_t EndOfBlock = Addr + FreeMB.Free.size();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
- // Store cutted free memory block.
- MB = sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
+
+ if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // Remember this pending block, such that future allocations can just
+ // modify it rather than creating a new one
+ FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
+ } else {
+ sys::MemoryBlock &PendingMB = MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
+ PendingMB = sys::MemoryBlock(PendingMB.base(), Addr + Size - (uintptr_t)PendingMB.base());
+ }
+
+ // Remember how much free space is now left in this block
+ FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
return (uint8_t*)Addr;
}
}
@@ -83,18 +97,26 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
// Save this address as the basis for our next request
MemGroup.Near = MB;
- MemGroup.PendingMem.push_back(MB);
+ // Remember that we allocated this memory
+ MemGroup.AllocatedMem.push_back(MB);
Addr = (uintptr_t)MB.base();
uintptr_t EndOfBlock = Addr + MB.size();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
// The allocateMappedMemory may allocate much more memory than we need. In
// this case, we store the unused memory as a free memory block.
unsigned FreeSize = EndOfBlock-Addr-Size;
- if (FreeSize > 16)
- MemGroup.FreeMem.push_back(sys::MemoryBlock((void*)(Addr + Size), FreeSize));
+ if (FreeSize > 16) {
+ FreeMemBlock FreeMB;
+ FreeMB.Free = sys::MemoryBlock((void*)(Addr + Size), FreeSize);
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ MemGroup.FreeMem.push_back(FreeMB);
+ }
// Return aligned address
return (uint8_t*)Addr;
@@ -105,9 +127,6 @@ bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
// FIXME: Should in-progress permissions be reverted if an error occurs?
std::error_code ec;
- // Don't allow free memory blocks to be used after setting protection flags.
- CodeMem.FreeMem.clear();
-
// Make code memory executable.
ec = applyMemoryGroupPermissions(CodeMem,
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
@@ -138,25 +157,52 @@ bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
// relocations) will get to the data cache but not to the instruction cache.
invalidateInstructionCache();
- // Now, remember that we have successfully applied the permissions to avoid
- // having to apply them again.
- CodeMem.AllocatedMem.append(CodeMem.PendingMem.begin(),CodeMem.PendingMem.end());
- CodeMem.PendingMem.clear();
+ return false;
+}
- RODataMem.AllocatedMem.append(RODataMem.PendingMem.begin(),RODataMem.PendingMem.end());
- RODataMem.PendingMem.clear();
+static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
+ static const size_t PageSize = sys::Process::getPageSize();
- return false;
+ size_t StartOverlap =
+ (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
+
+ size_t TrimmedSize = M.size();
+ TrimmedSize -= StartOverlap;
+ TrimmedSize -= TrimmedSize % PageSize;
+
+ sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap), TrimmedSize);
+
+ assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
+ assert((Trimmed.size() % PageSize) == 0);
+ assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size());
+
+ return Trimmed;
}
+
std::error_code
SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
unsigned Permissions) {
-
for (sys::MemoryBlock &MB : MemGroup.PendingMem)
if (std::error_code EC = sys::Memory::protectMappedMemory(MB, Permissions))
return EC;
+ MemGroup.PendingMem.clear();
+
+ // Now go through free blocks and trim any of them that don't span the entire
+ // page because one of the pending blocks may have overlapped it.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
+ // We cleared the PendingMem list, so all these pointers are now invalid
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ }
+
+ // Remove all blocks which are now empty
+ MemGroup.FreeMem.erase(
+ std::remove_if(MemGroup.FreeMem.begin(), MemGroup.FreeMem.end(),
+ [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }),
+ MemGroup.FreeMem.end());
+
return std::error_code();
}
@@ -169,10 +215,7 @@ SectionMemoryManager::~SectionMemoryManager() {
for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
for (sys::MemoryBlock &Block : Group->AllocatedMem)
sys::Memory::releaseMappedMemory(Block);
- for (sys::MemoryBlock &Block : Group->PendingMem)
- sys::Memory::releaseMappedMemory(Block);
}
}
} // namespace llvm
-
OpenPOWER on IntegriCloud