summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
diff options
context:
space:
mode:
authorAmaury Sechet <deadalnix@gmail.com>2016-01-06 09:30:39 +0000
committerAmaury Sechet <deadalnix@gmail.com>2016-01-06 09:30:39 +0000
commitd3b2c0fd947e8c5098d9d371bd81322feb797ddc (patch)
tree49f750fb7e0fe0829729e71dff2c34b231133290 /llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
parent267163e713c8c7c92a76319e836d8cb5bdcc4fad (diff)
downloadbcm5719-llvm-d3b2c0fd947e8c5098d9d371bd81322feb797ddc.tar.gz
bcm5719-llvm-d3b2c0fd947e8c5098d9d371bd81322feb797ddc.zip
Improve load/store to memcpy for aggregate
Summary: It turns out that if we don't try to do it at the store location, we can do it before any operation that alias the load, as long as no operation alias the store. Reviewers: craig.topper, spatel, dexonsmith, Prazek, chandlerc, joker.eph Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D15903 llvm-svn: 256923
Diffstat (limited to 'llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp32
1 files changed, 23 insertions, 9 deletions
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 94725db56b8..7354016c212 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -519,19 +519,33 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// We use alias analysis to check if an instruction may store to
// the memory we load from in between the load and the store. If
- // such an instruction is found, we store it in AI.
- Instruction *AI = nullptr;
+ // such an instruction is found, we try to promote there instead
+ // of at the store position.
+ Instruction *P = SI;
for (BasicBlock::iterator I = ++LI->getIterator(), E = SI->getIterator();
I != E; ++I) {
- if (AA.getModRefInfo(&*I, LoadLoc) & MRI_Mod) {
- AI = &*I;
- break;
+ if (!(AA.getModRefInfo(&*I, LoadLoc) & MRI_Mod))
+ continue;
+
+ // We found an instruction that may write to the loaded memory.
+ // We can try to promote at this position instead of the store
+ // position if nothing alias the store memory after this.
+ P = &*I;
+ for (; I != E; ++I) {
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
+ if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
+ DEBUG(dbgs() << "Alias " << *I << "\n");
+ P = nullptr;
+ break;
+ }
}
+
+ break;
}
- // If no aliasing instruction is found, then we can promote the
- // load/store pair to a memcpy at the store loaction.
- if (!AI) {
+ // If a valid insertion position is found, then we can promote
+ // the load/store pair to a memcpy.
+ if (P) {
// If we load from memory that may alias the memory we store to,
// memmove must be used to preserve semantic. If not, memcpy can
// be used.
@@ -542,7 +556,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
unsigned Align = findCommonAlignment(DL, SI, LI);
uint64_t Size = DL.getTypeStoreSize(T);
- IRBuilder<> Builder(SI);
+ IRBuilder<> Builder(P);
Instruction *M;
if (UseMemMove)
M = Builder.CreateMemMove(SI->getPointerOperand(),
OpenPOWER on IntegriCloud