diff options
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Transforms/Scalar/GVNHoist.cpp | 89 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Utils/MemorySSA.cpp | 105 |
2 files changed, 90 insertions, 104 deletions
diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp index 4328afe3d85..8b2164c5071 100644 --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -19,12 +19,12 @@ // 2. geps when corresponding load/store cannot be hoisted. //===----------------------------------------------------------------------===// -#include "llvm/Transforms/Scalar/GVN.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/GVN.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/MemorySSA.h" @@ -55,10 +55,10 @@ static cl::opt<int> MaxDepthInBB( cl::desc("Hoist instructions from the beginning of the BB up to the " "maximum specified depth (default = 100, unlimited = -1)")); -static cl::opt<int> - MaxChainLength("gvn-hoist-max-chain-length", cl::Hidden, cl::init(10), - cl::desc("Maximum length of dependent chains to hoist " - "(default = 10, unlimited = -1)")); +static cl::opt<int> MaxChainLength( + "gvn-hoist-max-chain-length", cl::Hidden, cl::init(10), + cl::desc("Maximum length of dependent chains to hoist " + "(default = 10, unlimited = -1)")); namespace { @@ -89,7 +89,7 @@ public: ADFS = DFSNumber.lookup(BA); BDFS = DFSNumber.lookup(BB); } - assert(ADFS && BDFS); + assert (ADFS && BDFS); return ADFS < BDFS; } }; @@ -213,7 +213,7 @@ public: for (const BasicBlock *BB : depth_first(&F.getEntryBlock())) { DFSNumber[BB] = ++BBI; unsigned I = 0; - for (auto &Inst : *BB) + for (auto &Inst: *BB) DFSNumber[&Inst] = ++I; } @@ -239,7 +239,6 @@ public: return Res; } - private: GVN::ValueTable VN; DominatorTree *DT; @@ -323,42 +322,38 @@ private: /* Return true when I1 appears before I2 in the instructions of BB. */ bool firstInBB(const Instruction *I1, const Instruction *I2) { - assert(I1->getParent() == I2->getParent()); + assert (I1->getParent() == I2->getParent()); unsigned I1DFS = DFSNumber.lookup(I1); unsigned I2DFS = DFSNumber.lookup(I2); - assert(I1DFS && I2DFS); + assert (I1DFS && I2DFS); return I1DFS < I2DFS; } - // Return true when there are memory uses of Def in BB. - bool hasMemoryUse(const Instruction *NewPt, MemoryDef *Def, - const BasicBlock *BB) { - const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB); - if (!Acc) - return false; - - Instruction *OldPt = Def->getMemoryInst(); + // Return true when there are users of Def in BB. + bool hasMemoryUseOnPath(MemoryAccess *Def, const BasicBlock *BB, + const Instruction *OldPt) { + const BasicBlock *DefBB = Def->getBlock(); const BasicBlock *OldBB = OldPt->getParent(); - const BasicBlock *NewBB = NewPt->getParent(); - bool ReachedNewPt = false; - for (const MemoryAccess &MA : *Acc) - if (const MemoryUse *MU = dyn_cast<MemoryUse>(&MA)) { - Instruction *Insn = MU->getMemoryInst(); - - // Do not check whether MU aliases Def when MU occurs after OldPt. - if (BB == OldBB && firstInBB(OldPt, Insn)) - break; + for (User *U : Def->users()) + if (auto *MU = dyn_cast<MemoryUse>(U)) { + // FIXME: MU->getBlock() does not get updated when we move the instruction. + BasicBlock *UBB = MU->getMemoryInst()->getParent(); + // Only analyze uses in BB. + if (BB != UBB) + continue; - // Do not check whether MU aliases Def when MU occurs before NewPt. - if (BB == NewBB) { - if (!ReachedNewPt) { - if (firstInBB(Insn, NewPt)) - continue; - ReachedNewPt = true; - } + // A use in the same block as the Def is on the path. + if (UBB == DefBB) { + assert(MSSA->locallyDominates(Def, MU) && "def not dominating use"); + return true; } - if (defClobbersUseOrDef(Def, MU, *AA)) + + if (UBB != OldBB) + return true; + + // It is only harmful to hoist when the use is before OldPt. + if (firstInBB(MU->getMemoryInst(), OldPt)) return true; } @@ -366,18 +361,17 @@ private: } // Return true when there are exception handling or loads of memory Def - // between Def and NewPt. This function is only called for stores: Def is - // the MemoryDef of the store to be hoisted. + // between OldPt and NewPt. // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and // return true when the counter NBBsOnAllPaths reaces 0, except when it is // initialized to -1 which is unlimited. - bool hasEHOrLoadsOnPath(const Instruction *NewPt, MemoryDef *Def, - int &NBBsOnAllPaths) { + bool hasEHOrLoadsOnPath(const Instruction *NewPt, const Instruction *OldPt, + MemoryAccess *Def, int &NBBsOnAllPaths) { const BasicBlock *NewBB = NewPt->getParent(); - const BasicBlock *OldBB = Def->getBlock(); + const BasicBlock *OldBB = OldPt->getParent(); assert(DT->dominates(NewBB, OldBB) && "invalid path"); - assert(DT->dominates(Def->getDefiningAccess()->getBlock(), NewBB) && + assert(DT->dominates(Def->getBlock(), NewBB) && "def does not dominate new hoisting point"); // Walk all basic blocks reachable in depth-first iteration on the inverse @@ -396,7 +390,7 @@ private: return true; // Check that we do not move a store past loads. - if (hasMemoryUse(NewPt, Def, *I)) + if (hasMemoryUseOnPath(Def, *I, OldPt)) return true; // Stop walk once the limit is reached. @@ -479,7 +473,7 @@ private: // Check for unsafe hoistings due to side effects. if (K == InsKind::Store) { - if (hasEHOrLoadsOnPath(NewPt, dyn_cast<MemoryDef>(U), NBBsOnAllPaths)) + if (hasEHOrLoadsOnPath(NewPt, OldPt, D, NBBsOnAllPaths)) return false; } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths)) return false; @@ -653,8 +647,7 @@ private: for (const Use &Op : I->operands()) if (const auto *Inst = dyn_cast<Instruction>(&Op)) if (!DT->dominates(Inst->getParent(), HoistPt)) { - if (const GetElementPtrInst *GepOp = - dyn_cast<GetElementPtrInst>(Inst)) { + if (const GetElementPtrInst *GepOp = dyn_cast<GetElementPtrInst>(Inst)) { if (!allGepOperandsAvailable(GepOp, HoistPt)) return false; // Gep is available if all operands of GepOp are available. @@ -671,8 +664,7 @@ private: void makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt, const SmallVecInsn &InstructionsToHoist, Instruction *Gep) const { - assert(allGepOperandsAvailable(Gep, HoistPt) && - "GEP operands not available"); + assert(allGepOperandsAvailable(Gep, HoistPt) && "GEP operands not available"); Instruction *ClonedGep = Gep->clone(); for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i) @@ -976,7 +968,8 @@ public: }; } // namespace -PreservedAnalyses GVNHoistPass::run(Function &F, FunctionAnalysisManager &AM) { +PreservedAnalyses GVNHoistPass::run(Function &F, + FunctionAnalysisManager &AM) { DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); AliasAnalysis &AA = AM.getResult<AAManager>(F); MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F); diff --git a/llvm/lib/Transforms/Utils/MemorySSA.cpp b/llvm/lib/Transforms/Utils/MemorySSA.cpp index ed2208eb2b4..e06b60484c5 100644 --- a/llvm/lib/Transforms/Utils/MemorySSA.cpp +++ b/llvm/lib/Transforms/Utils/MemorySSA.cpp @@ -169,6 +169,44 @@ template <> struct DenseMapInfo<MemoryLocOrCall> { return LHS == RHS; } }; +} + +namespace { +struct UpwardsMemoryQuery { + // True if our original query started off as a call + bool IsCall; + // The pointer location we started the query with. This will be empty if + // IsCall is true. + MemoryLocation StartingLoc; + // This is the instruction we were querying about. + const Instruction *Inst; + // The MemoryAccess we actually got called with, used to test local domination + const MemoryAccess *OriginalAccess; + + UpwardsMemoryQuery() + : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {} + + UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) + : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { + if (!IsCall) + StartingLoc = MemoryLocation::get(Inst); + } +}; + +static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, + AliasAnalysis &AA) { + Instruction *Inst = MD->getMemoryInst(); + if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { + switch (II->getIntrinsicID()) { + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: + return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); + default: + return false; + } + } + return false; +} enum class Reorderability { Always, IfNoAlias, Never }; @@ -210,6 +248,17 @@ static Reorderability getLoadReorderability(const LoadInst *Use, return Result; } +static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, + const Instruction *I) { + // If the memory can't be changed, then loads of the memory can't be + // clobbered. + // + // FIXME: We should handle invariant groups, as well. It's a bit harder, + // because we need to pay close attention to invariant group barriers. + return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || + AA.pointsToConstantMemory(I)); +} + static bool instructionClobbersQuery(MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, @@ -254,62 +303,6 @@ static bool instructionClobbersQuery(MemoryDef *MD, return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod; } -// Return true when MD may alias MU, return false otherwise. -bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, - AliasAnalysis &AA) { - Instruction *Insn = MU->getMemoryInst(); - return instructionClobbersQuery(MD, MemoryLocation::get(Insn), Insn, AA); -} -} - -namespace { -struct UpwardsMemoryQuery { - // True if our original query started off as a call - bool IsCall; - // The pointer location we started the query with. This will be empty if - // IsCall is true. - MemoryLocation StartingLoc; - // This is the instruction we were querying about. - const Instruction *Inst; - // The MemoryAccess we actually got called with, used to test local domination - const MemoryAccess *OriginalAccess; - - UpwardsMemoryQuery() - : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {} - - UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) - : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { - if (!IsCall) - StartingLoc = MemoryLocation::get(Inst); - } -}; - -static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, - AliasAnalysis &AA) { - Instruction *Inst = MD->getMemoryInst(); - if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { - switch (II->getIntrinsicID()) { - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); - default: - return false; - } - } - return false; -} - -static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, - const Instruction *I) { - // If the memory can't be changed, then loads of the memory can't be - // clobbered. - // - // FIXME: We should handle invariant groups, as well. It's a bit harder, - // because we need to pay close attention to invariant group barriers. - return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || - AA.pointsToConstantMemory(I)); -} - static bool instructionClobbersQuery(MemoryDef *MD, MemoryUse *MU, const MemoryLocOrCall &UseMLOC, AliasAnalysis &AA) { |

