diff options
author | Sebastian Pop <sebpop@gmail.com> | 2016-10-12 02:23:39 +0000 |
---|---|---|
committer | Sebastian Pop <sebpop@gmail.com> | 2016-10-12 02:23:39 +0000 |
commit | ab12fb62ee04512ed1a21529ab8be0c08562343b (patch) | |
tree | 68deba40b43c14815ba088c783ee53b8dde23c8a /llvm/lib | |
parent | 89997ecd8fecabb82e62fd7efe146f17e5554463 (diff) | |
download | bcm5719-llvm-ab12fb62ee04512ed1a21529ab8be0c08562343b.tar.gz bcm5719-llvm-ab12fb62ee04512ed1a21529ab8be0c08562343b.zip |
GVN-hoist: fix store past load dependence analysis (PR30216, PR30499)
This is a refreshed version of a patch that was reverted: it fixes
the problems reported in both PR30216 and PR30499, and
contains all the test-cases from both bugs.
To hoist stores past loads, we used to search for potential
conflicting loads on the hoisting path by following a MemorySSA
def-def link from the store to be hoisted to the previous
defining memory access, and from there we followed the def-use
chains to all the uses that occur on the hoisting path. The
problem is that the def-def link may point to a store that does
not alias with the store to be hoisted, and so the loads that are
walked may not alias with the store to be hoisted, and even as in
the testcase of PR30216, the loads that may alias with the store
to be hoisted are not visited.
The current patch visits all loads on the path from the store to
be hoisted to the hoisting position and uses the alias analysis
to ask whether the store may alias the load. I was not able to
use the MemorySSA functionality to ask for whether load and
store are clobbered: I'm not sure which function to call, so I
used a call to AA->isNoAlias().
Store past store is still working as before using a MemorySSA
query: I added an extra test to pr30216.ll to make sure store
past store does not regress.
Tested on x86_64-linux with check and a test-suite run.
Differential Revision: https://reviews.llvm.org/D25476
llvm-svn: 283965
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVNHoist.cpp | 62 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/MemorySSA.cpp | 107 |
2 files changed, 88 insertions, 81 deletions
diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index 8b2164c5071..92b7e35a17f 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -329,31 +329,36 @@ private: return I1DFS < I2DFS; } - // Return true when there are users of Def in BB. - bool hasMemoryUseOnPath(MemoryAccess *Def, const BasicBlock *BB, - const Instruction *OldPt) { - const BasicBlock *DefBB = Def->getBlock(); - const BasicBlock *OldBB = OldPt->getParent(); + // Return true when there are memory uses of Def in BB. + bool hasMemoryUse(const Instruction *NewPt, MemoryDef *Def, + const BasicBlock *BB) { + const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB); + if (!Acc) + return false; - for (User *U : Def->users()) - if (auto *MU = dyn_cast<MemoryUse>(U)) { - // FIXME: MU->getBlock() does not get updated when we move the instruction. - BasicBlock *UBB = MU->getMemoryInst()->getParent(); - // Only analyze uses in BB. - if (BB != UBB) - continue; + Instruction *OldPt = Def->getMemoryInst(); + const BasicBlock *OldBB = OldPt->getParent(); + const BasicBlock *NewBB = NewPt->getParent(); + bool ReachedNewPt = false; - // A use in the same block as the Def is on the path. - if (UBB == DefBB) { - assert(MSSA->locallyDominates(Def, MU) && "def not dominating use"); - return true; - } + for (const MemoryAccess &MA : *Acc) + if (const MemoryUse *MU = dyn_cast<MemoryUse>(&MA)) { + Instruction *Insn = MU->getMemoryInst(); - if (UBB != OldBB) - return true; + // Do not check whether MU aliases Def when MU occurs after OldPt. + if (BB == OldBB && firstInBB(OldPt, Insn)) + break; - // It is only harmful to hoist when the use is before OldPt. - if (firstInBB(MU->getMemoryInst(), OldPt)) + // Do not check whether MU aliases Def when MU occurs before NewPt. + if (BB == NewBB) { + if (!ReachedNewPt) { + if (firstInBB(Insn, NewPt)) + continue; + ReachedNewPt = true; + } + } + if (instructionClobbersQuery(Def, MemoryLocation::get(Insn), Insn, + *AA)) return true; } @@ -361,17 +366,18 @@ private: } // Return true when there are exception handling or loads of memory Def - // between OldPt and NewPt. + // between Def and NewPt. This function is only called for stores: Def is + // the MemoryDef of the store to be hoisted. // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and // return true when the counter NBBsOnAllPaths reaces 0, except when it is // initialized to -1 which is unlimited. - bool hasEHOrLoadsOnPath(const Instruction *NewPt, const Instruction *OldPt, - MemoryAccess *Def, int &NBBsOnAllPaths) { + bool hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def, + int &NBBsOnAllPaths) { const BasicBlock *NewBB = NewPt->getParent(); - const BasicBlock *OldBB = OldPt->getParent(); + const BasicBlock *OldBB = Def->getBlock(); assert(DT->dominates(NewBB, OldBB) && "invalid path"); - assert(DT->dominates(Def->getBlock(), NewBB) && + assert(DT->dominates(Def->getDefiningAccess()->getBlock(), NewBB) && "def does not dominate new hoisting point"); // Walk all basic blocks reachable in depth-first iteration on the inverse @@ -390,7 +396,7 @@ private: return true; // Check that we do not move a store past loads. - if (hasMemoryUseOnPath(Def, *I, OldPt)) + if (hasMemoryUse(NewPt, Def, *I)) return true; // Stop walk once the limit is reached. @@ -473,7 +479,7 @@ private: // Check for unsafe hoistings due to side effects. if (K == InsKind::Store) { - if (hasEHOrLoadsOnPath(NewPt, OldPt, D, NBBsOnAllPaths)) + if (hasEHOrLoadsOnPath(NewPt, dyn_cast<MemoryDef>(U), NBBsOnAllPaths)) return false; } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths)) return false; diff --git a/llvm/lib/Transforms/Utils/MemorySSA.cpp b/llvm/lib/Transforms/Utils/MemorySSA.cpp index e06b60484c5..d549f6a9476 100644 --- a/llvm/lib/Transforms/Utils/MemorySSA.cpp +++ b/llvm/lib/Transforms/Utils/MemorySSA.cpp @@ -169,44 +169,6 @@ template <> struct DenseMapInfo<MemoryLocOrCall> { return LHS == RHS; } }; -} - -namespace { -struct UpwardsMemoryQuery { - // True if our original query started off as a call - bool IsCall; - // The pointer location we started the query with. This will be empty if - // IsCall is true. - MemoryLocation StartingLoc; - // This is the instruction we were querying about. - const Instruction *Inst; - // The MemoryAccess we actually got called with, used to test local domination - const MemoryAccess *OriginalAccess; - - UpwardsMemoryQuery() - : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {} - - UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) - : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { - if (!IsCall) - StartingLoc = MemoryLocation::get(Inst); - } -}; - -static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, - AliasAnalysis &AA) { - Instruction *Inst = MD->getMemoryInst(); - if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { - switch (II->getIntrinsicID()) { - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); - default: - return false; - } - } - return false; -} enum class Reorderability { Always, IfNoAlias, Never }; @@ -248,21 +210,10 @@ static Reorderability getLoadReorderability(const LoadInst *Use, return Result; } -static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, - const Instruction *I) { - // If the memory can't be changed, then loads of the memory can't be - // clobbered. - // - // FIXME: We should handle invariant groups, as well. It's a bit harder, - // because we need to pay close attention to invariant group barriers. - return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || - AA.pointsToConstantMemory(I)); -} - -static bool instructionClobbersQuery(MemoryDef *MD, - const MemoryLocation &UseLoc, - const Instruction *UseInst, - AliasAnalysis &AA) { +bool instructionClobbersQuery(MemoryDef *MD, + const MemoryLocation &UseLoc, + const Instruction *UseInst, + AliasAnalysis &AA) { Instruction *DefInst = MD->getMemoryInst(); assert(DefInst && "Defining instruction not actually an instruction"); @@ -303,6 +254,56 @@ static bool instructionClobbersQuery(MemoryDef *MD, return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod; } +} + +namespace { +struct UpwardsMemoryQuery { + // True if our original query started off as a call + bool IsCall; + // The pointer location we started the query with. This will be empty if + // IsCall is true. + MemoryLocation StartingLoc; + // This is the instruction we were querying about. + const Instruction *Inst; + // The MemoryAccess we actually got called with, used to test local domination + const MemoryAccess *OriginalAccess; + + UpwardsMemoryQuery() + : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {} + + UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) + : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { + if (!IsCall) + StartingLoc = MemoryLocation::get(Inst); + } +}; + +static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, + AliasAnalysis &AA) { + Instruction *Inst = MD->getMemoryInst(); + if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { + switch (II->getIntrinsicID()) { + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: + return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); + default: + return false; + } + } + return false; +} + +static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, + const Instruction *I) { + // If the memory can't be changed, then loads of the memory can't be + // clobbered. + // + // FIXME: We should handle invariant groups, as well. It's a bit harder, + // because we need to pay close attention to invariant group barriers. + return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || + AA.pointsToConstantMemory(I)); +} + static bool instructionClobbersQuery(MemoryDef *MD, MemoryUse *MU, const MemoryLocOrCall &UseMLOC, AliasAnalysis &AA) { |