summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/Analysis/IVUsers.h6
-rw-r--r--llvm/include/llvm/Analysis/LoopAccessAnalysis.h7
-rw-r--r--llvm/include/llvm/Analysis/LoopInfo.h13
-rw-r--r--llvm/include/llvm/Analysis/LoopPassManager.h408
-rw-r--r--llvm/include/llvm/Transforms/Scalar/IndVarSimplify.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LICM.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopDeletion.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopInstSimplify.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopRotation.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopStrengthReduce.h3
-rw-r--r--llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h3
-rw-r--r--llvm/lib/Analysis/IVUsers.cpp18
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp29
-rw-r--r--llvm/lib/Analysis/LoopInfo.cpp7
-rw-r--r--llvm/lib/Analysis/LoopPass.cpp10
-rw-r--r--llvm/lib/Analysis/LoopPassManager.cpp188
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp12
-rw-r--r--llvm/lib/Transforms/Scalar/IndVarSimplify.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp19
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDeletion.cpp13
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDistribute.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/LoopRotation.cpp17
-rw-r--r--llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp36
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp7
-rw-r--r--llvm/test/Other/loop-pass-ordering.ll11
-rw-r--r--llvm/test/Other/new-pass-manager.ll27
-rw-r--r--llvm/test/Other/pass-pipeline-parsing.ll8
-rw-r--r--llvm/unittests/Analysis/LoopPassManagerTest.cpp1439
34 files changed, 2015 insertions, 387 deletions
diff --git a/llvm/include/llvm/Analysis/IVUsers.h b/llvm/include/llvm/Analysis/IVUsers.h
index e1a5467d8b6..223366d2af0 100644
--- a/llvm/include/llvm/Analysis/IVUsers.h
+++ b/llvm/include/llvm/Analysis/IVUsers.h
@@ -193,7 +193,8 @@ class IVUsersAnalysis : public AnalysisInfoMixin<IVUsersAnalysis> {
public:
typedef IVUsers Result;
- IVUsers run(Loop &L, LoopAnalysisManager &AM);
+ IVUsers run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR);
};
/// Printer pass for the \c IVUsers for a loop.
@@ -202,7 +203,8 @@ class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
public:
explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
}
diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index 76066f6003e..f545052ce8a 100644
--- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -753,8 +753,8 @@ class LoopAccessAnalysis
public:
typedef LoopAccessInfo Result;
- Result run(Loop &, LoopAnalysisManager &);
- static StringRef name() { return "LoopAccessAnalysis"; }
+
+ Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
};
/// \brief Printer pass for the \c LoopAccessInfo results.
@@ -764,7 +764,8 @@ class LoopAccessInfoPrinterPass
public:
explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
inline Instruction *MemoryDepChecker::Dependence::getSource(
diff --git a/llvm/include/llvm/Analysis/LoopInfo.h b/llvm/include/llvm/Analysis/LoopInfo.h
index 0c99c6297c1..20e6af2727f 100644
--- a/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/llvm/include/llvm/Analysis/LoopInfo.h
@@ -853,17 +853,8 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
-/// \brief Pass for printing a loop's contents as LLVM's text IR assembly.
-class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
- raw_ostream &OS;
- std::string Banner;
-
-public:
- PrintLoopPass();
- PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
-
- PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &);
-};
+/// Function to print a loop's contents as LLVM's text IR assembly.
+void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
} // End llvm namespace
diff --git a/llvm/include/llvm/Analysis/LoopPassManager.h b/llvm/include/llvm/Analysis/LoopPassManager.h
index ae9c16502fe..dd2279fa655 100644
--- a/llvm/include/llvm/Analysis/LoopPassManager.h
+++ b/llvm/include/llvm/Analysis/LoopPassManager.h
@@ -8,63 +8,342 @@
//===----------------------------------------------------------------------===//
/// \file
///
-/// This header provides classes for managing passes over loops in LLVM IR.
+/// This header provides classes for managing a pipeline of passes over loops
+/// in LLVM IR.
+///
+/// The primary loop pass pipeline is managed in a very particular way to
+/// provide a set of core guarantees:
+/// 1) Loops are, where possible, in simplified form.
+/// 2) Loops are *always* in LCSSA form.
+/// 3) A collection of Loop-specific analysis results are available:
+/// - LoopInfo
+/// - DominatorTree
+/// - ScalarEvolution
+/// - AAManager
+/// 4) All loop passes preserve #1 (where possible), #2, and #3.
+/// 5) Loop passes run over each loop in the loop nest from the innermost to
+/// the outermost. Specifically, all inner loops are processed before
+/// passes run over outer loops. When running the pipeline across an inner
+/// loop creates new inner loops, those are added and processed in this
+/// order as well.
+///
+/// This process is designed to facilitate transformations which simplify,
+/// reduce, and remove loops. For passes which are more oriented towards
+/// optimizing loops, especially optimizing loop *nests* instead of single
+/// loops in isolation, this framework is less interesting.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPPASSMANAGER_H
#define LLVM_ANALYSIS_LOOPPASSMANAGER_H
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
-extern template class PassManager<Loop>;
-/// \brief The loop pass manager.
-///
-/// See the documentation for the PassManager template for details. It runs a
-/// sequency of loop passes over each loop that the manager is run over. This
-/// typedef serves as a convenient way to refer to this construct.
-typedef PassManager<Loop> LoopPassManager;
+// Forward declarations of a update tracking and analysis result tracking
+// structures used in the API of loop passes that work within this
+// infrastructure.
+class LPMUpdater;
+struct LoopStandardAnalysisResults;
+
+/// Extern template declaration for the analysis set for this IR unit.
+extern template class AllAnalysesOn<Loop>;
-extern template class AnalysisManager<Loop>;
+extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
/// \brief The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
-typedef AnalysisManager<Loop> LoopAnalysisManager;
+typedef AnalysisManager<Loop, LoopStandardAnalysisResults &>
+ LoopAnalysisManager;
+
+// Explicit specialization and instantiation declarations for the pass manager.
+// See the comments on the definition of the specialization for details on how
+// it differs from the primary template.
+template <>
+PreservedAnalyses
+PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+ LPMUpdater &>::run(Loop &InitialL, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AnalysisResults,
+ LPMUpdater &U);
+extern template class PassManager<Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &>;
+
+/// \brief The Loop pass manager.
+///
+/// See the documentation for the PassManager template for details. It runs
+/// a sequence of Loop passes over each Loop that the manager is run over. This
+/// typedef serves as a convenient way to refer to this construct.
+typedef PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+ LPMUpdater &>
+ LoopPassManager;
+
+/// A partial specialization of the require analysis template pass to forward
+/// the extra parameters from a transformation's run method to the
+/// AnalysisManager's getResult.
+template <typename AnalysisT>
+struct RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &>
+ : PassInfoMixin<
+ RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &>> {
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
+ (void)AM.template getResult<AnalysisT>(L, AR);
+ return PreservedAnalyses::all();
+ }
+};
+
+/// An alias template to easily name a require analysis loop pass.
+template <typename AnalysisT>
+using RequireAnalysisLoopPass =
+ RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &>;
/// A proxy from a \c LoopAnalysisManager to a \c Function.
typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
LoopAnalysisManagerFunctionProxy;
-/// Specialization of the invalidate method for the \c
-/// LoopAnalysisManagerFunctionProxy's result.
+/// A specialized result for the \c LoopAnalysisManagerFunctionProxy which
+/// retains a \c LoopInfo reference.
+///
+/// This allows it to collect loop objects for which analysis results may be
+/// cached in the \c LoopAnalysisManager.
+template <> class LoopAnalysisManagerFunctionProxy::Result {
+public:
+ explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI)
+ : InnerAM(&InnerAM), LI(&LI) {}
+ Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI) {
+ // We have to null out the analysis manager in the moved-from state
+ // because we are taking ownership of the responsibilty to clear the
+ // analysis state.
+ Arg.InnerAM = nullptr;
+ }
+ Result &operator=(Result &&RHS) {
+ InnerAM = RHS.InnerAM;
+ LI = RHS.LI;
+ // We have to null out the analysis manager in the moved-from state
+ // because we are taking ownership of the responsibilty to clear the
+ // analysis state.
+ RHS.InnerAM = nullptr;
+ return *this;
+ }
+ ~Result() {
+ // InnerAM is cleared in a moved from state where there is nothing to do.
+ if (!InnerAM)
+ return;
+
+ // Clear out the analysis manager if we're being destroyed -- it means we
+ // didn't even see an invalidate call when we got invalidated.
+ InnerAM->clear();
+ }
+
+ /// Accessor for the analysis manager.
+ LoopAnalysisManager &getManager() { return *InnerAM; }
+
+ /// Handler for invalidation of the proxy for a particular function.
+ ///
+ /// If the proxy, \c LoopInfo, and associated analyses are preserved, this
+ /// will merely forward the invalidation event to any cached loop analysis
+ /// results for loops within this function.
+ ///
+ /// If the necessary loop infrastructure is not preserved, this will forcibly
+ /// clear all of the cached analysis results that are keyed on the \c
+ /// LoopInfo for this function.
+ bool invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &Inv);
+
+private:
+ LoopAnalysisManager *InnerAM;
+ LoopInfo *LI;
+};
+
+/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy
+/// so it can pass the \c LoopInfo to the result.
template <>
-bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
- Function &F, const PreservedAnalyses &PA,
- FunctionAnalysisManager::Invalidator &Inv);
+LoopAnalysisManagerFunctionProxy::Result
+LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);
// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
// template.
extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
-extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>;
+extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+ LoopStandardAnalysisResults &>;
/// A proxy from a \c FunctionAnalysisManager to a \c Loop.
-typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>
+typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+ LoopStandardAnalysisResults &>
FunctionAnalysisManagerLoopProxy;
/// Returns the minimum set of Analyses that all loop passes must preserve.
PreservedAnalyses getLoopPassPreservedAnalyses();
+namespace internal {
+/// Helper to implement appending of loops onto a worklist.
+///
+/// We want to process loops in postorder, but the worklist is a LIFO data
+/// structure, so we append to it in *reverse* postorder.
+///
+/// For trees, a preorder traversal is a viable reverse postorder, so we
+/// actually append using a preorder walk algorithm.
+template <typename RangeT>
+inline void appendLoopsToWorklist(RangeT &&Loops,
+ SmallPriorityWorklist<Loop *, 4> &Worklist) {
+ // We use an internal worklist to build up the preorder traversal without
+ // recursion.
+ SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
+
+ // We walk the initial sequence of loops in reverse because we generally want
+ // to visit defs before uses and the worklist is LIFO.
+ for (Loop *RootL : reverse(Loops)) {
+ assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
+ assert(PreOrderWorklist.empty() &&
+ "Must start with an empty preorder walk worklist.");
+ PreOrderWorklist.push_back(RootL);
+ do {
+ Loop *L = PreOrderWorklist.pop_back_val();
+ PreOrderWorklist.append(L->begin(), L->end());
+ PreOrderLoops.push_back(L);
+ } while (!PreOrderWorklist.empty());
+
+ Worklist.insert(std::move(PreOrderLoops));
+ PreOrderLoops.clear();
+ }
+}
+}
+
+/// The adaptor from a function pass to a loop pass directly computes
+/// a standard set of analyses that are especially useful to loop passes and
+/// makes them available in the API. Loop passes are also expected to update
+/// all of these so that they remain correct across the entire loop pipeline.
+struct LoopStandardAnalysisResults {
+ AAResults &AA;
+ AssumptionCache &AC;
+ DominatorTree &DT;
+ LoopInfo &LI;
+ ScalarEvolution &SE;
+ TargetLibraryInfo &TLI;
+ TargetTransformInfo &TTI;
+};
+
+template <typename LoopPassT> class FunctionToLoopPassAdaptor;
+
+/// This class provides an interface for updating the loop pass manager based
+/// on mutations to the loop nest.
+///
+/// A reference to an instance of this class is passed as an argument to each
+/// Loop pass, and Loop passes should use it to update LPM infrastructure if
+/// they modify the loop nest structure.
+class LPMUpdater {
+public:
+ /// This can be queried by loop passes which run other loop passes (like pass
+ /// managers) to know whether the loop needs to be skipped due to updates to
+ /// the loop nest.
+ ///
+ /// If this returns true, the loop object may have been deleted, so passes
+ /// should take care not to touch the object.
+ bool skipCurrentLoop() const { return SkipCurrentLoop; }
+
+ /// Loop passes should use this method to indicate they have deleted a loop
+ /// from the nest.
+ ///
+ /// Note that this loop must either be the current loop or a subloop of the
+ /// current loop. This routine must be called prior to removing the loop from
+ /// the loop nest.
+ ///
+ /// If this is called for the current loop, in addition to clearing any
+ /// state, this routine will mark that the current loop should be skipped by
+ /// the rest of the pass management infrastructure.
+ void markLoopAsDeleted(Loop &L) {
+ LAM.clear(L);
+ assert(CurrentL->contains(&L) && "Cannot delete a loop outside of the "
+ "subloop tree currently being processed.");
+ if (&L == CurrentL)
+ SkipCurrentLoop = true;
+ }
+
+ /// Loop passes should use this method to indicate they have added new child
+ /// loops of the current loop.
+ ///
+ /// \p NewChildLoops must contain only the immediate children. Any nested
+ /// loops within them will be visited in postorder as usual for the loop pass
+ /// manager.
+ void addChildLoops(ArrayRef<Loop *> NewChildLoops) {
+ // Insert ourselves back into the worklist first, as this loop should be
+ // revisited after all the children have been processed.
+ Worklist.insert(CurrentL);
+
+#ifndef NDEBUG
+ for (Loop *NewL : NewChildLoops)
+ assert(NewL->getParentLoop() == CurrentL && "All of the new loops must "
+ "be immediate children of "
+ "the current loop!");
+#endif
+
+ internal::appendLoopsToWorklist(NewChildLoops, Worklist);
+
+ // Also skip further processing of the current loop--it will be revisited
+ // after all of its newly added children are accounted for.
+ SkipCurrentLoop = true;
+ }
+
+ /// Loop passes should use this method to indicate they have added new
+ /// sibling loops to the current loop.
+ ///
+ /// \p NewSibLoops must only contain the immediate sibling loops. Any nested
+ /// loops within them will be visited in postorder as usual for the loop pass
+ /// manager.
+ void addSiblingLoops(ArrayRef<Loop *> NewSibLoops) {
+#ifndef NDEBUG
+ for (Loop *NewL : NewSibLoops)
+ assert(NewL->getParentLoop() == ParentL &&
+ "All of the new loops must be siblings of the current loop!");
+#endif
+
+ internal::appendLoopsToWorklist(NewSibLoops, Worklist);
+
+ // No need to skip the current loop or revisit it, as sibling loops
+ // shouldn't impact anything.
+ }
+
+private:
+ template <typename LoopPassT> friend class llvm::FunctionToLoopPassAdaptor;
+
+ /// The \c FunctionToLoopPassAdaptor's worklist of loops to process.
+ SmallPriorityWorklist<Loop *, 4> &Worklist;
+
+ /// The analysis manager for use in the current loop nest.
+ LoopAnalysisManager &LAM;
+
+ Loop *CurrentL;
+ bool SkipCurrentLoop;
+
+#ifndef NDEBUG
+ // In debug builds we also track the parent loop to implement asserts even in
+ // the face of loop deletion.
+ Loop *ParentL;
+#endif
+
+ LPMUpdater(SmallPriorityWorklist<Loop *, 4> &Worklist,
+ LoopAnalysisManager &LAM)
+ : Worklist(Worklist), LAM(LAM) {}
+};
+
/// \brief Adaptor that maps from a function to its loops.
///
/// Designed to allow composition of a LoopPass(Manager) and a
@@ -87,42 +366,61 @@ public:
// Get the loop structure for this function
LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
- // Also precompute all of the function analyses used by loop passes.
- // FIXME: These should be handed into the loop passes when the loop pass
- // management layer is reworked to follow the design of CGSCC.
- (void)AM.getResult<AAManager>(F);
- (void)AM.getResult<DominatorTreeAnalysis>(F);
- (void)AM.getResult<ScalarEvolutionAnalysis>(F);
- (void)AM.getResult<TargetLibraryAnalysis>(F);
+ // If there are no loops, there is nothing to do here.
+ if (LI.empty())
+ return PreservedAnalyses::all();
+
+ // Get the analysis results needed by loop passes.
+ LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
+ AM.getResult<AssumptionAnalysis>(F),
+ AM.getResult<DominatorTreeAnalysis>(F),
+ AM.getResult<LoopAnalysis>(F),
+ AM.getResult<ScalarEvolutionAnalysis>(F),
+ AM.getResult<TargetLibraryAnalysis>(F),
+ AM.getResult<TargetIRAnalysis>(F)};
PreservedAnalyses PA = PreservedAnalyses::all();
- // We want to visit the loops in reverse post-order. We'll build the stack
- // of loops to visit in Loops by first walking the loops in pre-order.
- SmallVector<Loop *, 2> Loops;
- SmallVector<Loop *, 2> WorkList(LI.begin(), LI.end());
- while (!WorkList.empty()) {
- Loop *L = WorkList.pop_back_val();
- WorkList.insert(WorkList.end(), L->begin(), L->end());
- Loops.push_back(L);
- }
-
- // Now pop each element off of the stack to visit the loops in reverse
- // post-order.
- for (auto *L : reverse(Loops)) {
- PreservedAnalyses PassPA = Pass.run(*L, LAM);
+ // A postorder worklist of loops to process.
+ SmallPriorityWorklist<Loop *, 4> Worklist;
+
+ // Register the worklist and loop analysis manager so that loop passes can
+ // update them when they mutate the loop nest structure.
+ LPMUpdater Updater(Worklist, LAM);
+
+ // Add the loop nests in the reverse order of LoopInfo. For some reason,
+ // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For
+ // the purpose of unrolling, loop deletion, and LICM, we largely want to
+ // work forward across the CFG so that we visit defs before uses and can
+ // propagate simplifications from one loop nest into the next.
+ // FIXME: Consider changing the order in LoopInfo.
+ internal::appendLoopsToWorklist(reverse(LI), Worklist);
+
+ do {
+ Loop *L = Worklist.pop_back_val();
+
+ // Reset the update structure for this loop.
+ Updater.CurrentL = L;
+ Updater.SkipCurrentLoop = false;
+#ifndef NDEBUG
+ Updater.ParentL = L->getParentLoop();
+#endif
+
+ PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater);
// FIXME: We should verify the set of analyses relevant to Loop passes
// are preserved.
- // We know that the loop pass couldn't have invalidated any other loop's
- // analyses (that's the contract of a loop pass), so directly handle the
- // loop analysis manager's invalidation here.
- LAM.invalidate(*L, PassPA);
+ // If the loop hasn't been deleted, we need to handle invalidation here.
+ if (!Updater.skipCurrentLoop())
+ // We know that the loop pass couldn't have invalidated any other
+ // loop's analyses (that's the contract of a loop pass), so directly
+ // handle the loop analysis manager's invalidation here.
+ LAM.invalidate(*L, PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
- }
+ } while (!Worklist.empty());
// By definition we preserve the proxy. We also preserve all analyses on
// Loops. This precludes *any* invalidation of loop analyses by the proxy,
@@ -130,6 +428,17 @@ public:
// loop analysis manager incrementally above.
PA.preserveSet<AllAnalysesOn<Loop>>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
+ // We also preserve the set of standard analyses.
+ PA.preserve<AssumptionAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<LoopAnalysis>();
+ PA.preserve<ScalarEvolutionAnalysis>();
+ // FIXME: What we really want to do here is preserve an AA category, but
+ // that concept doesn't exist yet.
+ PA.preserve<AAManager>();
+ PA.preserve<BasicAA>();
+ PA.preserve<GlobalsAA>();
+ PA.preserve<SCEVAA>();
return PA;
}
@@ -144,6 +453,19 @@ FunctionToLoopPassAdaptor<LoopPassT>
createFunctionToLoopPassAdaptor(LoopPassT Pass) {
return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass));
}
+
+/// \brief Pass for printing a loop's contents as textual IR.
+class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
+ raw_ostream &OS;
+ std::string Banner;
+
+public:
+ PrintLoopPass();
+ PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
+
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &, LPMUpdater &);
+};
}
#endif // LLVM_ANALYSIS_LOOPPASSMANAGER_H
diff --git a/llvm/include/llvm/Transforms/Scalar/IndVarSimplify.h b/llvm/include/llvm/Transforms/Scalar/IndVarSimplify.h
index 24a31594b15..231d7fd97f5 100644
--- a/llvm/include/llvm/Transforms/Scalar/IndVarSimplify.h
+++ b/llvm/include/llvm/Transforms/Scalar/IndVarSimplify.h
@@ -23,7 +23,8 @@ namespace llvm {
class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
}
diff --git a/llvm/include/llvm/Transforms/Scalar/LICM.h b/llvm/include/llvm/Transforms/Scalar/LICM.h
index 39bbc72f8cb..533c4e6adb5 100644
--- a/llvm/include/llvm/Transforms/Scalar/LICM.h
+++ b/llvm/include/llvm/Transforms/Scalar/LICM.h
@@ -42,7 +42,8 @@ namespace llvm {
/// Performs Loop Invariant Code Motion Pass.
class LICMPass : public PassInfoMixin<LICMPass> {
public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopDeletion.h b/llvm/include/llvm/Transforms/Scalar/LoopDeletion.h
index 891f08faa48..7265d3c6441 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopDeletion.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopDeletion.h
@@ -24,7 +24,8 @@ namespace llvm {
class LoopDeletionPass : public PassInfoMixin<LoopDeletionPass> {
public:
LoopDeletionPass() {}
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
bool runImpl(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
LoopInfo &loopInfo);
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h b/llvm/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
index 0c052ddd2fe..e992efdb0d7 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
@@ -25,7 +25,8 @@ namespace llvm {
/// Performs Loop Idiom Recognize Pass.
class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopInstSimplify.h b/llvm/include/llvm/Transforms/Scalar/LoopInstSimplify.h
index e30f4a97b78..64d17c5cc1b 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopInstSimplify.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopInstSimplify.h
@@ -23,7 +23,8 @@ namespace llvm {
/// Performs Loop Inst Simplify Pass.
class LoopInstSimplifyPass : public PassInfoMixin<LoopInstSimplifyPass> {
public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopRotation.h b/llvm/include/llvm/Transforms/Scalar/LoopRotation.h
index 54b8ec545ed..789beaf2233 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopRotation.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopRotation.h
@@ -24,7 +24,8 @@ namespace llvm {
class LoopRotatePass : public PassInfoMixin<LoopRotatePass> {
public:
LoopRotatePass(bool EnableHeaderDuplication = true);
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
private:
const bool EnableHeaderDuplication;
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h b/llvm/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
index 2f06782052c..91892c78df4 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
@@ -26,7 +26,8 @@ namespace llvm {
/// Performs basic CFG simplifications to assist other loop passes.
class LoopSimplifyCFGPass : public PassInfoMixin<LoopSimplifyCFGPass> {
public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopStrengthReduce.h b/llvm/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
index 11c0d9bce85..05ecd5deaa0 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
@@ -31,7 +31,8 @@ namespace llvm {
/// Performs Loop Strength Reduce Pass.
class LoopStrengthReducePass : public PassInfoMixin<LoopStrengthReducePass> {
public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h
index 74a7258df5f..4e259c7cc32 100644
--- a/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h
+++ b/llvm/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -23,7 +23,8 @@ struct LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
Optional<bool> ProvidedRuntime;
Optional<bool> ProvidedUpperBound;
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm
diff --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index 76e2561b9da..eb3782c3631 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -36,19 +36,15 @@ using namespace llvm;
AnalysisKey IVUsersAnalysis::Key;
-IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM) {
- const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- return IVUsers(&L, FAM.getCachedResult<AssumptionAnalysis>(*F),
- FAM.getCachedResult<LoopAnalysis>(*F),
- FAM.getCachedResult<DominatorTreeAnalysis>(*F),
- FAM.getCachedResult<ScalarEvolutionAnalysis>(*F));
+IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR) {
+ return IVUsers(&L, &AR.AC, &AR.LI, &AR.DT, &AR.SE);
}
-PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM) {
- AM.getResult<IVUsersAnalysis>(L).print(OS);
+PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &U) {
+ AM.getResult<IVUsersAnalysis>(L, AR).print(OS);
return PreservedAnalyses::all();
}
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 2f3dca3d23f..0de75ec2d17 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -2120,31 +2120,16 @@ INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
AnalysisKey LoopAccessAnalysis::Key;
-LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM) {
- const FunctionAnalysisManager &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function &F = *L.getHeader()->getParent();
- auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(F);
- auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(F);
- auto *AA = FAM.getCachedResult<AAManager>(F);
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
- auto *LI = FAM.getCachedResult<LoopAnalysis>(F);
- if (!SE)
- report_fatal_error(
- "ScalarEvolution must have been cached at a higher level");
- if (!AA)
- report_fatal_error("AliasAnalysis must have been cached at a higher level");
- if (!DT)
- report_fatal_error("DominatorTree must have been cached at a higher level");
- if (!LI)
- report_fatal_error("LoopInfo must have been cached at a higher level");
- return LoopAccessInfo(&L, SE, TLI, AA, DT, LI);
+LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR) {
+ return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
}
-PreservedAnalyses LoopAccessInfoPrinterPass::run(Loop &L,
- LoopAnalysisManager &AM) {
+PreservedAnalyses
+LoopAccessInfoPrinterPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
Function &F = *L.getHeader()->getParent();
- auto &LAI = AM.getResult<LoopAccessAnalysis>(L);
+ auto &LAI = AM.getResult<LoopAccessAnalysis>(L, AR);
OS << "Loop access info in function '" << F.getName() << "':\n";
OS.indent(2) << L.getHeader()->getName() << ":\n";
LAI.print(OS, 4);
diff --git a/llvm/lib/Analysis/LoopInfo.cpp b/llvm/lib/Analysis/LoopInfo.cpp
index 3d85ef6988a..f449ce94d57 100644
--- a/llvm/lib/Analysis/LoopInfo.cpp
+++ b/llvm/lib/Analysis/LoopInfo.cpp
@@ -689,18 +689,13 @@ PreservedAnalyses LoopPrinterPass::run(Function &F,
return PreservedAnalyses::all();
}
-PrintLoopPass::PrintLoopPass() : OS(dbgs()) {}
-PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner)
- : OS(OS), Banner(Banner) {}
-
-PreservedAnalyses PrintLoopPass::run(Loop &L, AnalysisManager<Loop> &) {
+void llvm::printLoop(Loop &L, raw_ostream &OS, const std::string &Banner) {
OS << Banner;
for (auto *Block : L.blocks())
if (Block)
Block->print(OS);
else
OS << "Printing <null> block";
- return PreservedAnalyses::all();
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Analysis/LoopPass.cpp b/llvm/lib/Analysis/LoopPass.cpp
index b5b8040984d..2686334045b 100644
--- a/llvm/lib/Analysis/LoopPass.cpp
+++ b/llvm/lib/Analysis/LoopPass.cpp
@@ -32,13 +32,14 @@ namespace {
/// PrintLoopPass - Print a Function corresponding to a Loop.
///
class PrintLoopPassWrapper : public LoopPass {
- PrintLoopPass P;
+ raw_ostream &OS;
+ std::string Banner;
public:
static char ID;
- PrintLoopPassWrapper() : LoopPass(ID) {}
+ PrintLoopPassWrapper() : LoopPass(ID), OS(dbgs()) {}
PrintLoopPassWrapper(raw_ostream &OS, const std::string &Banner)
- : LoopPass(ID), P(OS, Banner) {}
+ : LoopPass(ID), OS(OS), Banner(Banner) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
@@ -49,8 +50,7 @@ public:
[](BasicBlock *BB) { return BB; });
if (BBI != L->blocks().end() &&
isFunctionInPrintList((*BBI)->getParent()->getName())) {
- LoopAnalysisManager DummyLAM;
- P.run(*L, DummyLAM);
+ printLoop(*L, OS, Banner);
}
return false;
}
diff --git a/llvm/lib/Analysis/LoopPassManager.cpp b/llvm/lib/Analysis/LoopPassManager.cpp
index 044e5d55daf..75b5db55e54 100644
--- a/llvm/lib/Analysis/LoopPassManager.cpp
+++ b/llvm/lib/Analysis/LoopPassManager.cpp
@@ -20,34 +20,191 @@ using namespace llvm;
// Explicit template instantiations and specialization defininitions for core
// template typedefs.
namespace llvm {
-template class PassManager<Loop>;
-template class AnalysisManager<Loop>;
+template class AllAnalysesOn<Loop>;
+template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
+template class PassManager<Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &>;
template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
-template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>;
+template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+ LoopStandardAnalysisResults &>;
+/// Explicitly specialize the pass manager's run method to handle loop nest
+/// structure updates.
template <>
+PreservedAnalyses
+PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+ LPMUpdater &>::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U) {
+ PreservedAnalyses PA = PreservedAnalyses::all();
+
+ if (DebugLogging)
+ dbgs() << "Starting Loop pass manager run.\n";
+
+ for (auto &Pass : Passes) {
+ if (DebugLogging)
+ dbgs() << "Running pass: " << Pass->name() << " on " << L;
+
+ PreservedAnalyses PassPA = Pass->run(L, AM, AR, U);
+
+ // If the loop was deleted, abort the run and return to the outer walk.
+ if (U.skipCurrentLoop()) {
+ PA.intersect(std::move(PassPA));
+ break;
+ }
+
+ // Update the analysis manager as each pass runs and potentially
+ // invalidates analyses.
+ AM.invalidate(L, PassPA);
+
+ // Finally, we intersect the final preserved analyses to compute the
+ // aggregate preserved set for this pass manager.
+ PA.intersect(std::move(PassPA));
+
+ // FIXME: Historically, the pass managers all called the LLVM context's
+ // yield function here. We don't have a generic way to acquire the
+ // context and it isn't yet clear what the right pattern is for yielding
+ // in the new pass manager so it is currently omitted.
+ // ...getContext().yield();
+ }
+
+ // Invalidation for the current loop should be handled above, and other loop
+ // analysis results shouldn't be impacted by runs over this loop. Therefore,
+ // the remaining analysis results in the AnalysisManager are preserved. We
+ // mark this with a set so that we don't need to inspect each one
+ // individually.
+ // FIXME: This isn't correct! This loop and all nested loops' analyses should
+ // be preserved, but unrolling should invalidate the parent loop's analyses.
+ PA.preserveSet<AllAnalysesOn<Loop>>();
+
+ if (DebugLogging)
+ dbgs() << "Finished Loop pass manager run.\n";
+
+ return PA;
+}
+
bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
- // If this proxy isn't marked as preserved, the set of Function objects in
- // the module may have changed. We therefore can't call
- // InnerAM->invalidate(), because any pointers to Functions it has may be
- // stale.
+ // First compute the sequence of IR units covered by this proxy. We will want
+ // to visit this in postorder, but because this is a tree structure we can do
+ // this by building a preorder sequence and walking it in reverse.
+ SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
+ // Note that we want to walk the roots in reverse order because we will end
+ // up reversing the preorder sequence. However, it happens that the loop nest
+ // roots are in reverse order within the LoopInfo object. So we just walk
+ // forward here.
+ // FIXME: If we change the order of LoopInfo we will want to add a reverse
+ // here.
+ for (Loop *RootL : *LI) {
+ assert(PreOrderWorklist.empty() &&
+ "Must start with an empty preorder walk worklist.");
+ PreOrderWorklist.push_back(RootL);
+ do {
+ Loop *L = PreOrderWorklist.pop_back_val();
+ PreOrderWorklist.append(L->begin(), L->end());
+ PreOrderLoops.push_back(L);
+ } while (!PreOrderWorklist.empty());
+ }
+
+ // If this proxy or the loop info is going to be invalidated, we also need
+ // to clear all the keys coming from that analysis. We also completely blow
+ // away the loop analyses if any of the standard analyses provided by the
+ // loop pass manager go away so that loop analyses can freely use these
+ // without worrying about declaring dependencies on them etc.
+ // FIXME: It isn't clear if this is the right tradeoff. We could instead make
+ // loop analyses declare any dependencies on these and use the more general
+ // invalidation logic below to act on that.
auto PAC = PA.getChecker<LoopAnalysisManagerFunctionProxy>();
- if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Loop>>())
- InnerAM->clear();
+ if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
+ Inv.invalidate<AAManager>(F, PA) ||
+ Inv.invalidate<AssumptionAnalysis>(F, PA) ||
+ Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
+ Inv.invalidate<LoopAnalysis>(F, PA) ||
+ Inv.invalidate<ScalarEvolutionAnalysis>(F, PA)) {
+ // Note that the LoopInfo may be stale at this point, however the loop
+ // objects themselves remain the only viable keys that could be in the
+ // analysis manager's cache. So we just walk the keys and forcibly clear
+ // those results. Note that the order doesn't matter here as this will just
+ // directly destroy the results without calling methods on them.
+ for (Loop *L : PreOrderLoops)
+ InnerAM->clear(*L);
+
+ // We also need to null out the inner AM so that when the object gets
+ // destroyed as invalid we don't try to clear the inner AM again. At that
+ // point we won't be able to reliably walk the loops for this function and
+ // only clear results associated with those loops the way we do here.
+ // FIXME: Making InnerAM null at this point isn't very nice. Most analyses
+ // try to remain valid during invalidation. Maybe we should add an
+ // `IsClean` flag?
+ InnerAM = nullptr;
+
+ // Now return true to indicate this *is* invalid and a fresh proxy result
+ // needs to be built. This is especially important given the null InnerAM.
+ return true;
+ }
+
+ // Directly check if the relevant set is preserved so we can short circuit
+ // invalidating loops.
+ bool AreLoopAnalysesPreserved =
+ PA.allAnalysesInSetPreserved<AllAnalysesOn<Loop>>();
+
+ // Since we have a valid LoopInfo we can actually leave the cached results in
+ // the analysis manager associated with the Loop keys, but we need to
+ // propagate any necessary invalidation logic into them. We'd like to
+ // invalidate things in roughly the same order as they were put into the
+ // cache and so we walk the preorder list in reverse to form a valid
+ // postorder.
+ for (Loop *L : reverse(PreOrderLoops)) {
+ Optional<PreservedAnalyses> InnerPA;
+
+ // Check to see whether the preserved set needs to be adjusted based on
+ // function-level analysis invalidation triggering deferred invalidation
+ // for this loop.
+ if (auto *OuterProxy =
+ InnerAM->getCachedResult<FunctionAnalysisManagerLoopProxy>(*L))
+ for (const auto &OuterInvalidationPair :
+ OuterProxy->getOuterInvalidations()) {
+ AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
+ const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
+ if (Inv.invalidate(OuterAnalysisID, F, PA)) {
+ if (!InnerPA)
+ InnerPA = PA;
+ for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
+ InnerPA->abandon(InnerAnalysisID);
+ }
+ }
+
+ // Check if we needed a custom PA set. If so we'll need to run the inner
+ // invalidation.
+ if (InnerPA) {
+ InnerAM->invalidate(*L, *InnerPA);
+ continue;
+ }
- // FIXME: Proper suppor for invalidation isn't yet implemented for the LPM.
+ // Otherwise we only need to do invalidation if the original PA set didn't
+ // preserve all Loop analyses.
+ if (!AreLoopAnalysesPreserved)
+ InnerAM->invalidate(*L, PA);
+ }
// Return false to indicate that this result is still a valid proxy.
return false;
}
+
+template <>
+LoopAnalysisManagerFunctionProxy::Result
+LoopAnalysisManagerFunctionProxy::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ return Result(*InnerAM, AM.getResult<LoopAnalysis>(F));
+}
}
PreservedAnalyses llvm::getLoopPassPreservedAnalyses() {
PreservedAnalyses PA;
+ PA.preserve<AssumptionAnalysis>();
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
+ PA.preserve<LoopAnalysisManagerFunctionProxy>();
PA.preserve<ScalarEvolutionAnalysis>();
// TODO: What we really want to do here is preserve an AA category, but that
// concept doesn't exist yet.
@@ -57,3 +214,14 @@ PreservedAnalyses llvm::getLoopPassPreservedAnalyses() {
PA.preserve<SCEVAA>();
return PA;
}
+
+PrintLoopPass::PrintLoopPass() : OS(dbgs()) {}
+PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner)
+ : OS(OS), Banner(Banner) {}
+
+PreservedAnalyses PrintLoopPass::run(Loop &L, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &,
+ LPMUpdater &) {
+ printLoop(L, OS, Banner);
+ return PreservedAnalyses::all();
+}
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 6e0aae5fd85..dd9e41c44de 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -38,6 +38,7 @@
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ModuleSummaryAnalysis.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
@@ -220,7 +221,8 @@ public:
/// \brief No-op loop pass which does nothing.
struct NoOpLoopPass {
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &) {
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &, LPMUpdater &) {
return PreservedAnalyses::all();
}
static StringRef name() { return "NoOpLoopPass"; }
@@ -233,7 +235,9 @@ class NoOpLoopAnalysis : public AnalysisInfoMixin<NoOpLoopAnalysis> {
public:
struct Result {};
- Result run(Loop &, LoopAnalysisManager &) { return Result(); }
+ Result run(Loop &, LoopAnalysisManager &, LoopStandardAnalysisResults &) {
+ return Result();
+ }
static StringRef name() { return "NoOpLoopAnalysis"; }
};
@@ -1019,7 +1023,9 @@ bool PassBuilder::parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
if (Name == "require<" NAME ">") { \
LPM.addPass(RequireAnalysisPass< \
- std::remove_reference<decltype(CREATE_PASS)>::type, Loop>()); \
+ std::remove_reference<decltype(CREATE_PASS)>::type, Loop, \
+ LoopAnalysisManager, LoopStandardAnalysisResults &, \
+ LPMUpdater &>()); \
return true; \
} \
if (Name == "invalidate<" NAME ">") { \
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 3829aba9c86..10975cd7951 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -2482,23 +2482,13 @@ bool IndVarSimplify::run(Loop *L) {
return Changed;
}
-PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM) {
- auto &FAM = AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
+PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
Function *F = L.getHeader()->getParent();
const DataLayout &DL = F->getParent()->getDataLayout();
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
-
- assert((LI && SE && DT) &&
- "Analyses required for indvarsimplify not available!");
-
- // Optional analyses.
- auto *TTI = FAM.getCachedResult<TargetIRAnalysis>(*F);
- auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F);
-
- IndVarSimplify IVS(LI, SE, DT, DL, TLI, TTI);
+ IndVarSimplify IVS(&AR.LI, &AR.SE, &AR.DT, DL, &AR.TLI, &AR.TTI);
if (!IVS.run(&L))
return PreservedAnalyses::all();
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 26b3b14d186..24d1356ca5e 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -185,23 +185,20 @@ private:
};
}
-PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM) {
+PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
+ AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager();
Function *F = L.getHeader()->getParent();
- auto *AA = FAM.getCachedResult<AAManager>(*F);
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F);
- auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F);
- assert((AA && LI && DT && TLI && SE && ORE) &&
- "Analyses for LICM not available");
+ // FIXME: This should probably be optional rather than required.
+ if (!ORE)
+ report_fatal_error("LICM: OptimizationRemarkEmitterAnalysis not "
+ "cached at a higher level");
LoopInvariantCodeMotion LICM;
-
- if (!LICM.runOnLoop(&L, AA, LI, DT, TLI, SE, ORE, true))
+ if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.SE, ORE, true))
return PreservedAnalyses::all();
// FIXME: There is no setPreservesCFG in the new PM. When that becomes
diff --git a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 187e6e3073c..d79edd3f064 100644
--- a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -215,15 +215,10 @@ bool LoopDeletionPass::runImpl(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
return Changed;
}
-PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM) {
- auto &FAM = AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- auto &DT = *FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- auto &SE = *FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
- auto &LI = *FAM.getCachedResult<LoopAnalysis>(*F);
-
- bool Changed = runImpl(&L, DT, SE, LI);
+PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
+ bool Changed = runImpl(&L, AR.DT, AR.SE, AR.LI);
if (!Changed)
return PreservedAnalyses::all();
diff --git a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
index b2b2f72aa83..1336b05239e 100644
--- a/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -946,10 +946,18 @@ PreservedAnalyses LoopDistributePass::run(Function &F,
auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
+ // We don't directly need these analyses but they're required for loop
+ // analyses so provide them below.
+ auto &AA = AM.getResult<AAManager>(F);
+ auto &AC = AM.getResult<AssumptionAnalysis>(F);
+ auto &TTI = AM.getResult<TargetIRAnalysis>(F);
+ auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
+
auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
std::function<const LoopAccessInfo &(Loop &)> GetLAA =
[&](Loop &L) -> const LoopAccessInfo & {
- return LAM.getResult<LoopAccessAnalysis>(L);
+ LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI};
+ return LAM.getResult<LoopAccessAnalysis>(L, AR);
};
bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA);
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 2743574ecca..89f5a45e696 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -186,24 +186,12 @@ public:
};
} // End anonymous namespace.
-PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L,
- LoopAnalysisManager &AM) {
- const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- // Use getCachedResult because Loop pass cannot trigger a function analysis.
- auto *AA = FAM.getCachedResult<AAManager>(*F);
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
- auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F);
- const auto *TTI = FAM.getCachedResult<TargetIRAnalysis>(*F);
+PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
const auto *DL = &L.getHeader()->getModule()->getDataLayout();
- assert((AA && DT && LI && SE && TLI && TTI && DL) &&
- "Analyses for Loop Idiom Recognition not available");
- LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
+ LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
if (!LIR.runOnLoop(&L))
return PreservedAnalyses::all();
diff --git a/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index f6620ad1ade..3e5640826b2 100644
--- a/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -183,20 +183,10 @@ public:
};
}
-PreservedAnalyses LoopInstSimplifyPass::run(Loop &L,
- LoopAnalysisManager &AM) {
- const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- // Use getCachedResult because Loop pass cannot trigger a function analysis.
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- auto *AC = FAM.getCachedResult<AssumptionAnalysis>(*F);
- const auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F);
- assert((LI && AC && TLI) && "Analyses for Loop Inst Simplify not available");
-
- if (!SimplifyLoopInst(&L, DT, LI, AC, TLI))
+PreservedAnalyses LoopInstSimplifyPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
+ if (!SimplifyLoopInst(&L, &AR.DT, &AR.LI, &AR.AC, &AR.TLI))
return PreservedAnalyses::all();
return getLoopPassPreservedAnalyses();
diff --git a/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 0225cc32570..87f5f8b27a6 100644
--- a/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -625,20 +625,11 @@ bool LoopRotate::processLoop(Loop *L) {
LoopRotatePass::LoopRotatePass(bool EnableHeaderDuplication)
: EnableHeaderDuplication(EnableHeaderDuplication) {}
-PreservedAnalyses LoopRotatePass::run(Loop &L, LoopAnalysisManager &AM) {
- auto &FAM = AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- const auto *TTI = FAM.getCachedResult<TargetIRAnalysis>(*F);
- auto *AC = FAM.getCachedResult<AssumptionAnalysis>(*F);
- assert((LI && TTI && AC) && "Analyses for loop rotation not available");
-
- // Optional analyses.
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
+PreservedAnalyses LoopRotatePass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
int Threshold = EnableHeaderDuplication ? DefaultRotationThreshold : 0;
- LoopRotate LR(Threshold, LI, TTI, AC, DT, SE);
+ LoopRotate LR(Threshold, &AR.LI, &AR.TTI, &AR.AC, &AR.DT, &AR.SE);
bool Changed = LR.processLoop(&L);
if (!Changed)
diff --git a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
index d37339fc5fe..30d683611fc 100644
--- a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp
@@ -64,16 +64,10 @@ static bool simplifyLoopCFG(Loop &L, DominatorTree &DT, LoopInfo &LI) {
return Changed;
}
-PreservedAnalyses LoopSimplifyCFGPass::run(Loop &L, LoopAnalysisManager &AM) {
- const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- assert((LI && DT) && "Analyses for LoopSimplifyCFG not available");
-
- if (!simplifyLoopCFG(L, *DT, *LI))
+PreservedAnalyses LoopSimplifyCFGPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
+ if (!simplifyLoopCFG(L, AR.DT, AR.LI))
return PreservedAnalyses::all();
return getLoopPassPreservedAnalyses();
}
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index a61f646042a..5356835ab74 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -5052,21 +5052,11 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
return ReduceLoopStrength(L, IU, SE, DT, LI, TTI);
}
-PreservedAnalyses LoopStrengthReducePass::run(Loop &L,
- LoopAnalysisManager &AM) {
- const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
- Function *F = L.getHeader()->getParent();
-
- auto &IU = AM.getResult<IVUsersAnalysis>(L);
- auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
- auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- auto *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- auto *TTI = FAM.getCachedResult<TargetIRAnalysis>(*F);
- assert((SE && DT && LI && TTI) &&
- "Analyses for Loop Strength Reduce not available");
-
- if (!ReduceLoopStrength(&L, IU, *SE, *DT, *LI, *TTI))
+PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
+ if (!ReduceLoopStrength(&L, AM.getResult<IVUsersAnalysis>(L, AR), AR.SE,
+ AR.DT, AR.LI, AR.TTI))
return PreservedAnalyses::all();
return getLoopPassPreservedAnalyses();
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index f66369b3036..a31514b1b77 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -1111,41 +1111,23 @@ Pass *llvm::createSimpleLoopUnrollPass() {
return llvm::createLoopUnrollPass(-1, -1, 0, 0, 0);
}
-PreservedAnalyses LoopUnrollPass::run(Loop &L, LoopAnalysisManager &AM) {
+PreservedAnalyses LoopUnrollPass::run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &) {
const auto &FAM =
- AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
+ AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager();
Function *F = L.getHeader()->getParent();
-
- DominatorTree *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F);
- LoopInfo *LI = FAM.getCachedResult<LoopAnalysis>(*F);
- ScalarEvolution *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F);
- auto *TTI = FAM.getCachedResult<TargetIRAnalysis>(*F);
- auto *AC = FAM.getCachedResult<AssumptionAnalysis>(*F);
auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F);
- if (!DT)
- report_fatal_error(
- "LoopUnrollPass: DominatorTreeAnalysis not cached at a higher level");
- if (!LI)
- report_fatal_error(
- "LoopUnrollPass: LoopAnalysis not cached at a higher level");
- if (!SE)
- report_fatal_error(
- "LoopUnrollPass: ScalarEvolutionAnalysis not cached at a higher level");
- if (!TTI)
- report_fatal_error(
- "LoopUnrollPass: TargetIRAnalysis not cached at a higher level");
- if (!AC)
- report_fatal_error(
- "LoopUnrollPass: AssumptionAnalysis not cached at a higher level");
+ // FIXME: This should probably be optional rather than required.
if (!ORE)
report_fatal_error("LoopUnrollPass: OptimizationRemarkEmitterAnalysis not "
"cached at a higher level");
- bool Changed =
- tryToUnrollLoop(&L, *DT, LI, SE, *TTI, *AC, *ORE, /*PreserveLCSSA*/ true,
- ProvidedCount, ProvidedThreshold, ProvidedAllowPartial,
- ProvidedRuntime, ProvidedUpperBound);
+ bool Changed = tryToUnrollLoop(&L, AR.DT, &AR.LI, &AR.SE, AR.TTI, AR.AC, *ORE,
+ /*PreserveLCSSA*/ true, ProvidedCount,
+ ProvidedThreshold, ProvidedAllowPartial,
+ ProvidedRuntime, ProvidedUpperBound);
if (!Changed)
return PreservedAnalyses::all();
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 578c65daf7c..c9819a8e270 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7641,7 +7641,7 @@ PreservedAnalyses LoopVectorizePass::run(Function &F,
auto &TTI = AM.getResult<TargetIRAnalysis>(F);
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
- auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
+ auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
auto &AA = AM.getResult<AAManager>(F);
auto &AC = AM.getResult<AssumptionAnalysis>(F);
auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
@@ -7650,10 +7650,11 @@ PreservedAnalyses LoopVectorizePass::run(Function &F,
auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
std::function<const LoopAccessInfo &(Loop &)> GetLAA =
[&](Loop &L) -> const LoopAccessInfo & {
- return LAM.getResult<LoopAccessAnalysis>(L);
+ LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI};
+ return LAM.getResult<LoopAccessAnalysis>(L, AR);
};
bool Changed =
- runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE);
+ runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
if (!Changed)
return PreservedAnalyses::all();
PreservedAnalyses PA;
diff --git a/llvm/test/Other/loop-pass-ordering.ll b/llvm/test/Other/loop-pass-ordering.ll
index ceda0d3869d..ab3839f5cc9 100644
--- a/llvm/test/Other/loop-pass-ordering.ll
+++ b/llvm/test/Other/loop-pass-ordering.ll
@@ -8,11 +8,12 @@
; / \ \
; loop.0.0 loop.0.1 loop.1.0
;
-; CHECK: Running pass: NoOpLoopPass on loop.1.0
-; CHECK: Running pass: NoOpLoopPass on loop.1
-; CHECK: Running pass: NoOpLoopPass on loop.0.0
-; CHECK: Running pass: NoOpLoopPass on loop.0.1
-; CHECK: Running pass: NoOpLoopPass on loop.0
+; CHECK: Running pass: NoOpLoopPass on Loop at depth 2 containing: %loop.0.0
+; CHECK: Running pass: NoOpLoopPass on Loop at depth 2 containing: %loop.0.1
+; CHECK: Running pass: NoOpLoopPass on Loop at depth 1 containing: %loop.0
+; CHECK: Running pass: NoOpLoopPass on Loop at depth 2 containing: %loop.1.0
+; CHECK: Running pass: NoOpLoopPass on Loop at depth 1 containing: %loop.1
+
define void @f() {
entry:
br label %loop.0
diff --git a/llvm/test/Other/new-pass-manager.ll b/llvm/test/Other/new-pass-manager.ll
index 6224af09a3f..eae2d855e92 100644
--- a/llvm/test/Other/new-pass-manager.ll
+++ b/llvm/test/Other/new-pass-manager.ll
@@ -433,12 +433,12 @@
; CHECK-O: Running pass: TailCallElimPass
; CHECK-O: Running pass: SimplifyCFGPass
; CHECK-O: Running pass: ReassociatePass
-; CHECK-O: Starting llvm::Loop pass manager run.
-; CHECK-O: Finished llvm::Loop pass manager run.
+; CHECK-O: Starting Loop pass manager run.
+; CHECK-O: Finished Loop pass manager run.
; CHECK-O: Running pass: SimplifyCFGPass
; CHECK-O: Running pass: InstCombinePass
-; CHECK-O: Starting llvm::Loop pass manager run.
-; CHECK-O: Finished llvm::Loop pass manager run.
+; CHECK-O: Starting Loop pass manager run.
+; CHECK-O: Finished Loop pass manager run.
; CHECK-O: Running pass: MemCpyOptPass
; CHECK-O: Running pass: SCCPPass
; CHECK-O: Running pass: BDCEPass
@@ -544,20 +544,21 @@
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: DominatorTreeAnalysis
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AAManager
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetLibraryAnalysis
-; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: ScalarEvolutionAnalysis
; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AssumptionAnalysis
-; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: ScalarEvolutionAnalysis
+; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetIRAnalysis
+; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: RepeatedPass
-; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass
-; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run
-; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass
-; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run
-; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass
-; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run
-; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run
+; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Function pass manager run
; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Module pass manager run
diff --git a/llvm/test/Other/pass-pipeline-parsing.ll b/llvm/test/Other/pass-pipeline-parsing.ll
index ad222dbef7a..b303318c796 100644
--- a/llvm/test/Other/pass-pipeline-parsing.ll
+++ b/llvm/test/Other/pass-pipeline-parsing.ll
@@ -144,10 +144,10 @@
; CHECK-TWO-NOOP-LOOP: Running pass: ModuleToFunctionPassAdaptor
; CHECK-TWO-NOOP-LOOP: Starting llvm::Function pass manager run
; CHECK-TWO-NOOP-LOOP: Running pass: FunctionToLoopPassAdaptor
-; CHECK-TWO-NOOP-LOOP: Starting llvm::Loop pass manager run
+; CHECK-TWO-NOOP-LOOP: Starting Loop pass manager run
; CHECK-TWO-NOOP-LOOP: Running pass: NoOpLoopPass
; CHECK-TWO-NOOP-LOOP: Running pass: NoOpLoopPass
-; CHECK-TWO-NOOP-LOOP: Finished llvm::Loop pass manager run
+; CHECK-TWO-NOOP-LOOP: Finished Loop pass manager run
; CHECK-TWO-NOOP-LOOP: Finished llvm::Function pass manager run
; CHECK-TWO-NOOP-LOOP: Finished llvm::Module pass manager run
@@ -167,9 +167,9 @@
; CHECK-NESTED-FP-LP: Running pass: ModuleToFunctionPassAdaptor
; CHECK-NESTED-FP-LP: Starting llvm::Function pass manager run
; CHECK-NESTED-FP-LP: Running pass: FunctionToLoopPassAdaptor
-; CHECK-NESTED-FP-LP: Starting llvm::Loop pass manager run
+; CHECK-NESTED-FP-LP: Starting Loop pass manager run
; CHECK-NESTED-FP-LP: Running pass: NoOpLoopPass
-; CHECK-NESTED-FP-LP: Finished llvm::Loop pass manager run
+; CHECK-NESTED-FP-LP: Finished Loop pass manager run
; CHECK-NESTED-FP-LP: Finished llvm::Function pass manager run
; CHECK-NESTED-FP-LP: Finished llvm::Module pass manager run
diff --git a/llvm/unittests/Analysis/LoopPassManagerTest.cpp b/llvm/unittests/Analysis/LoopPassManagerTest.cpp
index 092e4bf9113..1934899c6c2 100644
--- a/llvm/unittests/Analysis/LoopPassManagerTest.cpp
+++ b/llvm/unittests/Analysis/LoopPassManagerTest.cpp
@@ -12,6 +12,7 @@
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
@@ -19,84 +20,198 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/SourceMgr.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace llvm;
namespace {
-class TestLoopAnalysis : public AnalysisInfoMixin<TestLoopAnalysis> {
- friend AnalysisInfoMixin<TestLoopAnalysis>;
- static AnalysisKey Key;
-
- int &Runs;
+using testing::DoDefault;
+using testing::Return;
+using testing::Expectation;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::_;
+template <typename DerivedT, typename IRUnitT,
+ typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+ typename... ExtraArgTs>
+class MockAnalysisHandleBase {
public:
- struct Result {
- Result(int Count) : BlockCount(Count) {}
- int BlockCount;
- };
+ class Analysis : public AnalysisInfoMixin<Analysis> {
+ friend AnalysisInfoMixin<Analysis>;
+ friend MockAnalysisHandleBase;
+ static AnalysisKey Key;
+
+ DerivedT *Handle;
+
+ Analysis(DerivedT &Handle) : Handle(&Handle) {}
+
+ public:
+ class Result {
+ friend MockAnalysisHandleBase;
+
+ DerivedT *Handle;
- TestLoopAnalysis(int &Runs) : Runs(Runs) {}
+ Result(DerivedT &Handle) : Handle(&Handle) {}
- /// \brief Run the analysis pass over the loop and return a result.
- Result run(Loop &L, LoopAnalysisManager &AM) {
- ++Runs;
- int Count = 0;
+ public:
+ // Forward invalidation events to the mock handle.
+ bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA,
+ typename AnalysisManagerT::Invalidator &Inv) {
+ return Handle->invalidate(IR, PA, Inv);
+ }
+ };
- for (auto I = L.block_begin(), E = L.block_end(); I != E; ++I)
- ++Count;
- return Result(Count);
+ Result run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs) {
+ return Handle->run(IR, AM, ExtraArgs...);
+ }
+ };
+
+ Analysis getAnalysis() { return Analysis(static_cast<DerivedT &>(*this)); }
+ typename Analysis::Result getResult() {
+ return typename Analysis::Result(static_cast<DerivedT &>(*this));
+ }
+
+protected:
+ /// Derived classes should call this in their constructor to set up default
+ /// mock actions. (We can't do this in our constructor because this has to
+ /// run after the DerivedT is constructed.)
+ void setDefaults() {
+ ON_CALL(static_cast<DerivedT &>(*this),
+ run(_, _, testing::Matcher<ExtraArgTs>(_)...))
+ .WillByDefault(Return(this->getResult()));
+ ON_CALL(static_cast<DerivedT &>(*this), invalidate(_, _, _))
+ .WillByDefault(Invoke([](IRUnitT &, const PreservedAnalyses &PA,
+ typename AnalysisManagerT::Invalidator &Inv) {
+ auto PAC = PA.getChecker<Analysis>();
+ return !PAC.preserved() &&
+ !PAC.template preservedSet<AllAnalysesOn<IRUnitT>>();
+ }));
}
};
-AnalysisKey TestLoopAnalysis::Key;
+template <typename DerivedT, typename IRUnitT, typename AnalysisManagerT,
+ typename... ExtraArgTs>
+AnalysisKey MockAnalysisHandleBase<DerivedT, IRUnitT, AnalysisManagerT,
+ ExtraArgTs...>::Analysis::Key;
-class TestLoopPass {
- std::vector<StringRef> &VisitedLoops;
- int &AnalyzedBlockCount;
- bool OnlyUseCachedResults;
+/// Mock handle for loop analyses.
+///
+/// This is provided as a template accepting an (optional) integer. Because
+/// analyses are identified and queried by type, this allows constructing
+/// multiple handles with distinctly typed nested 'Analysis' types that can be
+/// registered and queried. If you want to register multiple loop analysis
+/// passes, you'll need to instantiate this type with different values for I.
+/// For example:
+///
+/// MockLoopAnalysisHandleTemplate<0> h0;
+/// MockLoopAnalysisHandleTemplate<1> h1;
+/// typedef decltype(h0)::Analysis Analysis0;
+/// typedef decltype(h1)::Analysis Analysis1;
+template <size_t I = static_cast<size_t>(-1)>
+struct MockLoopAnalysisHandleTemplate
+ : MockAnalysisHandleBase<MockLoopAnalysisHandleTemplate<I>, Loop,
+ LoopAnalysisManager,
+ LoopStandardAnalysisResults &> {
+ typedef typename MockLoopAnalysisHandleTemplate::Analysis Analysis;
+ MOCK_METHOD3_T(run, typename Analysis::Result(Loop &, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &));
+
+ MOCK_METHOD3_T(invalidate, bool(Loop &, const PreservedAnalyses &,
+ LoopAnalysisManager::Invalidator &));
+
+ MockLoopAnalysisHandleTemplate() { this->setDefaults(); }
+};
+
+typedef MockLoopAnalysisHandleTemplate<> MockLoopAnalysisHandle;
+
+struct MockFunctionAnalysisHandle
+ : MockAnalysisHandleBase<MockFunctionAnalysisHandle, Function> {
+ MOCK_METHOD2(run, Analysis::Result(Function &, FunctionAnalysisManager &));
+
+ MOCK_METHOD3(invalidate, bool(Function &, const PreservedAnalyses &,
+ FunctionAnalysisManager::Invalidator &));
+
+ MockFunctionAnalysisHandle() { setDefaults(); }
+};
+
+template <typename DerivedT, typename IRUnitT,
+ typename AnalysisManagerT = AnalysisManager<IRUnitT>,
+ typename... ExtraArgTs>
+class MockPassHandleBase {
public:
- TestLoopPass(std::vector<StringRef> &VisitedLoops, int &AnalyzedBlockCount,
- bool OnlyUseCachedResults = false)
- : VisitedLoops(VisitedLoops), AnalyzedBlockCount(AnalyzedBlockCount),
- OnlyUseCachedResults(OnlyUseCachedResults) {}
-
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM) {
- VisitedLoops.push_back(L.getName());
-
- if (OnlyUseCachedResults) {
- // Hack to force the use of the cached interface.
- if (auto *AR = AM.getCachedResult<TestLoopAnalysis>(L))
- AnalyzedBlockCount += AR->BlockCount;
- } else {
- // Typical path just runs the analysis as needed.
- auto &AR = AM.getResult<TestLoopAnalysis>(L);
- AnalyzedBlockCount += AR.BlockCount;
+ class Pass : public PassInfoMixin<Pass> {
+ friend MockPassHandleBase;
+
+ DerivedT *Handle;
+
+ Pass(DerivedT &Handle) : Handle(&Handle) {}
+
+ public:
+ PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
+ ExtraArgTs... ExtraArgs) {
+ return Handle->run(IR, AM, ExtraArgs...);
}
+ };
- return PreservedAnalyses::all();
+ Pass getPass() { return Pass(static_cast<DerivedT &>(*this)); }
+
+protected:
+ /// Derived classes should call this in their constructor to set up default
+ /// mock actions. (We can't do this in our constructor because this has to
+ /// run after the DerivedT is constructed.)
+ void setDefaults() {
+ ON_CALL(static_cast<DerivedT &>(*this),
+ run(_, _, testing::Matcher<ExtraArgTs>(_)...))
+ .WillByDefault(Return(PreservedAnalyses::all()));
}
+};
- static StringRef name() { return "TestLoopPass"; }
+struct MockLoopPassHandle
+ : MockPassHandleBase<MockLoopPassHandle, Loop, LoopAnalysisManager,
+ LoopStandardAnalysisResults &, LPMUpdater &> {
+ MOCK_METHOD4(run,
+ PreservedAnalyses(Loop &, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &, LPMUpdater &));
+ MockLoopPassHandle() { setDefaults(); }
};
-// A test loop pass that invalidates the analysis for loops with the given name.
-class TestLoopInvalidatingPass {
- StringRef Name;
+struct MockFunctionPassHandle
+ : MockPassHandleBase<MockFunctionPassHandle, Function> {
+ MOCK_METHOD2(run, PreservedAnalyses(Function &, FunctionAnalysisManager &));
-public:
- TestLoopInvalidatingPass(StringRef LoopName) : Name(LoopName) {}
+ MockFunctionPassHandle() { setDefaults(); }
+};
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM) {
- return L.getName() == Name ? getLoopPassPreservedAnalyses()
- : PreservedAnalyses::all();
- }
+struct MockModulePassHandle : MockPassHandleBase<MockModulePassHandle, Module> {
+ MOCK_METHOD2(run, PreservedAnalyses(Module &, ModuleAnalysisManager &));
- static StringRef name() { return "TestLoopInvalidatingPass"; }
+ MockModulePassHandle() { setDefaults(); }
};
+/// Define a custom matcher for objects which support a 'getName' method
+/// returning a StringRef.
+///
+/// LLVM often has IR objects or analysis objects which expose a StringRef name
+/// and in tests it is convenient to match these by name for readability. This
+/// matcher supports any type exposing a getName() method of this form.
+///
+/// It should be used as:
+///
+/// HasName("my_function")
+///
+/// No namespace or other qualification is required.
+MATCHER_P(HasName, Name, "") {
+ // The matcher's name and argument are printed in the case of failure, but we
+ // also want to print out the name of the argument. This uses an implicitly
+ // avaiable std::ostream, so we have to construct a std::string.
+ *result_listener << "has name '" << arg.getName().str() << "'";
+ return Name == arg.getName();
+}
+
std::unique_ptr<Module> parseIR(LLVMContext &C, const char *IR) {
SMDiagnostic Err;
return parseAssemblyString(IR, Err, C);
@@ -107,6 +222,22 @@ protected:
LLVMContext Context;
std::unique_ptr<Module> M;
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ ModuleAnalysisManager MAM;
+
+ MockLoopAnalysisHandle MLAHandle;
+ MockLoopPassHandle MLPHandle;
+ MockFunctionPassHandle MFPHandle;
+ MockModulePassHandle MMPHandle;
+
+ static PreservedAnalyses
+ getLoopAnalysisResult(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
+ (void)AM.getResult<MockLoopAnalysisHandle::Analysis>(L, AR);
+ return PreservedAnalyses::all();
+ };
+
public:
LoopPassManagerTest()
: M(parseIR(Context, "define void @f() {\n"
@@ -129,81 +260,1175 @@ public:
" br i1 undef, label %loop.g.0, label %end\n"
"end:\n"
" ret void\n"
- "}\n")) {}
-};
+ "}\n")),
+ LAM(true), FAM(true), MAM(true) {
+ // Register our mock analysis.
+ LAM.registerPass([&] { return MLAHandle.getAnalysis(); });
-#define EXPECT_N_ELEMENTS_EQ(N, EXPECTED, ACTUAL) \
- do { \
- EXPECT_EQ(N##UL, ACTUAL.size()); \
- for (int I = 0; I < N; ++I) \
- EXPECT_TRUE(EXPECTED[I] == ACTUAL[I]) << "Element " << I << " is " \
- << ACTUAL[I] << ". Expected " \
- << EXPECTED[I] << "."; \
- } while (0)
+ // We need DominatorTreeAnalysis for LoopAnalysis.
+ FAM.registerPass([&] { return DominatorTreeAnalysis(); });
+ FAM.registerPass([&] { return LoopAnalysis(); });
+ // We also allow loop passes to assume a set of other analyses and so need
+ // those.
+ FAM.registerPass([&] { return AAManager(); });
+ FAM.registerPass([&] { return AssumptionAnalysis(); });
+ FAM.registerPass([&] { return ScalarEvolutionAnalysis(); });
+ FAM.registerPass([&] { return TargetLibraryAnalysis(); });
+ FAM.registerPass([&] { return TargetIRAnalysis(); });
-TEST_F(LoopPassManagerTest, Basic) {
- LoopAnalysisManager LAM(true);
- int LoopAnalysisRuns = 0;
- LAM.registerPass([&] { return TestLoopAnalysis(LoopAnalysisRuns); });
-
- FunctionAnalysisManager FAM(true);
- // We need DominatorTreeAnalysis for LoopAnalysis.
- FAM.registerPass([&] { return DominatorTreeAnalysis(); });
- FAM.registerPass([&] { return LoopAnalysis(); });
- // We also allow loop passes to assume a set of other analyses and so need
- // those.
- FAM.registerPass([&] { return AAManager(); });
- FAM.registerPass([&] { return TargetLibraryAnalysis(); });
- FAM.registerPass([&] { return ScalarEvolutionAnalysis(); });
- FAM.registerPass([&] { return AssumptionAnalysis(); });
- FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
- LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); });
-
- ModuleAnalysisManager MAM(true);
- MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
- FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
+ // Cross-register proxies.
+ LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); });
+ FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
+ FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
+ MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
+ }
+};
+TEST_F(LoopPassManagerTest, Basic) {
ModulePassManager MPM(true);
- FunctionPassManager FPM(true);
+ ::testing::InSequence MakeExpectationsSequenced;
- // Visit all of the loops.
- std::vector<StringRef> VisitedLoops1;
- int AnalyzedBlockCount1 = 0;
+ // First we just visit all the loops in all the functions and get their
+ // analysis results. This will run the analysis a total of four times,
+ // once for each loop.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+ // Wire the loop pass through pass managers into the module pipeline.
{
- LoopPassManager LPM;
- LPM.addPass(TestLoopPass(VisitedLoops1, AnalyzedBlockCount1));
-
+ LoopPassManager LPM(true);
+ LPM.addPass(MLPHandle.getPass());
+ FunctionPassManager FPM(true);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
- // Only use cached analyses.
- std::vector<StringRef> VisitedLoops2;
- int AnalyzedBlockCount2 = 0;
+ // Next we run two passes over the loops. The first one invalidates the
+ // analyses for one loop, the second ones try to get the analysis results.
+ // This should force only one analysis to re-run within the loop PM, but will
+ // also invalidate everything after the loop pass manager finishes.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(DoDefault())
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(InvokeWithoutArgs([] { return PreservedAnalyses::none(); }))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(DoDefault())
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _))
+ .WillOnce(DoDefault())
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ // Wire two loop pass runs into the module pipeline.
{
- LoopPassManager LPM;
- LPM.addPass(TestLoopInvalidatingPass("loop.g.0"));
- LPM.addPass(TestLoopPass(VisitedLoops2, AnalyzedBlockCount2,
- /*OnlyUseCachedResults=*/true));
-
+ LoopPassManager LPM(true);
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ FunctionPassManager FPM(true);
FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
+ // And now run the pipeline across the module.
+ MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, FunctionPassInvalidationOfLoopAnalyses) {
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ // We process each function completely in sequence.
+ ::testing::Sequence FSequence, GSequence;
+
+ // First, force the analysis result to be computed for each loop.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _))
+ .InSequence(GSequence)
+ .WillOnce(DoDefault());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ // No need to re-run if we require again from a fresh loop pass manager.
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ // For 'f', preserve most things but not the specific loop analyses.
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _))
+ .InSequence(FSequence)
+ .WillOnce(Return(getLoopPassPreservedAnalyses()));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ // On one loop, skip the invalidation (as though we did an internal update).
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(Return(false));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ // Now two loops still have to be recomputed.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ // Preserve things in the second function to ensure invalidation remains
+ // isolated to one function.
+ EXPECT_CALL(MFPHandle, run(HasName("g"), _))
+ .InSequence(GSequence)
+ .WillOnce(DoDefault());
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _))
+ .InSequence(FSequence)
+ .WillOnce(DoDefault());
+ // For 'g', fail to preserve anything, causing the loops themselves to be
+ // cleared. We don't get an invalidation event here as the loop is gone, but
+ // we should still have to recompute the analysis.
+ EXPECT_CALL(MFPHandle, run(HasName("g"), _))
+ .InSequence(GSequence)
+ .WillOnce(Return(PreservedAnalyses::none()));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _))
+ .InSequence(GSequence)
+ .WillOnce(DoDefault());
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+
+ // Verify with a separate function pass run that we didn't mess up 'f's
+ // cache. No analysis runs should be necessary here.
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, ModulePassInvalidationOfLoopAnalyses) {
+ ModulePassManager MPM(true);
+ ::testing::InSequence MakeExpectationsSequenced;
+
+ // First, force the analysis result to be computed for each loop.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ // Walking all the way out and all the way back in doesn't re-run the
+ // analysis.
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ // But a module pass that doesn't preserve the actual mock loop analysis
+ // invalidates all the way down and forces recomputing.
+ EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = getLoopPassPreservedAnalyses();
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
+ return PA;
+ }));
+ // All the loop analyses from both functions get invalidated before we
+ // recompute anything.
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _));
+ // On one loop, again skip the invalidation (as though we did an internal
+ // update).
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _))
+ .WillOnce(Return(false));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.g.0"), _, _));
+ // Now all but one of the loops gets re-analyzed.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+ MPM.addPass(MMPHandle.getPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ // Verify that the cached values persist.
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ // Now we fail to preserve the loop analysis and observe that the loop
+ // analyses are cleared (so no invalidation event) as the loops themselves
+ // are no longer valid.
+ EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+ MPM.addPass(MMPHandle.getPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ // Verify that the cached values persist.
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ // Next, check that even if we preserve everything within the function itelf,
+ // if the function's module pass proxy isn't preserved and the potential set
+ // of functions changes, the clear reaches the loop analyses as well. This
+ // will again trigger re-runs but not invalidation events.
+ EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserveSet<AllAnalysesOn<Function>>();
+ PA.preserveSet<AllAnalysesOn<Loop>>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+ MPM.addPass(MMPHandle.getPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>())));
+
+ MPM.run(*M, MAM);
+}
+
+// Test that if any of the bundled analyses provided in the LPM's signature
+// become invalid, the analysis proxy itself becomes invalid and we clear all
+// loop analysis results.
+TEST_F(LoopPassManagerTest, InvalidationOfBundledAnalyses) {
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ ::testing::InSequence MakeExpectationsSequenced;
+
+ // First, force the analysis result to be computed for each loop.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ // No need to re-run if we require again from a fresh loop pass manager.
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ // Preserving everything but the loop analyses themselves results in
+ // invalidation and running.
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _))
+ .WillOnce(Return(getLoopPassPreservedAnalyses()));
+ EXPECT_CALL(MLAHandle, invalidate(_, _, _)).Times(3);
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ // The rest don't invalidate analyses, they only trigger re-runs because we
+ // clear the cache completely.
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ // Not preserving `AAManager`.
+ PA.preserve<AssumptionAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<LoopAnalysis>();
+ PA.preserve<LoopAnalysisManagerFunctionProxy>();
+ PA.preserve<ScalarEvolutionAnalysis>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<AAManager>();
+ // Not preserving `AssumptionAnalysis`.
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<LoopAnalysis>();
+ PA.preserve<LoopAnalysisManagerFunctionProxy>();
+ PA.preserve<ScalarEvolutionAnalysis>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<AAManager>();
+ PA.preserve<AssumptionAnalysis>();
+ // Not preserving `DominatorTreeAnalysis`.
+ PA.preserve<LoopAnalysis>();
+ PA.preserve<LoopAnalysisManagerFunctionProxy>();
+ PA.preserve<ScalarEvolutionAnalysis>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<AAManager>();
+ PA.preserve<AssumptionAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ // Not preserving the `LoopAnalysis`.
+ PA.preserve<LoopAnalysisManagerFunctionProxy>();
+ PA.preserve<ScalarEvolutionAnalysis>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<AAManager>();
+ PA.preserve<AssumptionAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<LoopAnalysis>();
+ // Not preserving the `LoopAnalysisManagerFunctionProxy`.
+ PA.preserve<ScalarEvolutionAnalysis>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = PreservedAnalyses::none();
+ PA.preserve<AAManager>();
+ PA.preserve<AssumptionAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<LoopAnalysis>();
+ PA.preserve<LoopAnalysisManagerFunctionProxy>();
+ // Not preserving `ScalarEvolutionAnalysis`.
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ RequireAnalysisLoopPass<MockLoopAnalysisHandle::Analysis>()));
+
+ // After all the churn on 'f', we'll compute the loop analysis results for
+ // 'g' once with a requires pass and then run our mock pass over g a bunch
+ // but just get cached results each time.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+ EXPECT_CALL(MFPHandle, run(HasName("g"), _)).Times(7);
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, IndirectInvalidation) {
+ // We need two distinct analysis types and handles.
+ enum { A, B };
+ MockLoopAnalysisHandleTemplate<A> MLAHandleA;
+ MockLoopAnalysisHandleTemplate<B> MLAHandleB;
+ LAM.registerPass([&] { return MLAHandleA.getAnalysis(); });
+ LAM.registerPass([&] { return MLAHandleB.getAnalysis(); });
+ typedef decltype(MLAHandleA)::Analysis AnalysisA;
+ typedef decltype(MLAHandleB)::Analysis AnalysisB;
+
+ // Set up AnalysisA to depend on our AnalysisB. For testing purposes we just
+ // need to get the AnalysisB results in AnalysisA's run method and check if
+ // AnalysisB gets invalidated in AnalysisA's invalidate method.
+ ON_CALL(MLAHandleA, run(_, _, _))
+ .WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR) {
+ (void)AM.getResult<AnalysisB>(L, AR);
+ return MLAHandleA.getResult();
+ }));
+ ON_CALL(MLAHandleA, invalidate(_, _, _))
+ .WillByDefault(Invoke([](Loop &L, const PreservedAnalyses &PA,
+ LoopAnalysisManager::Invalidator &Inv) {
+ auto PAC = PA.getChecker<AnalysisA>();
+ return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Loop>>()) ||
+ Inv.invalidate<AnalysisB>(L, PA);
+ }));
- StringRef ExpectedLoops[] = {"loop.0.0", "loop.0.1", "loop.0", "loop.g.0"};
+ ::testing::InSequence MakeExpectationsSequenced;
- // Validate the counters and order of loops visited.
- // loop.0 has 3 blocks whereas loop.0.0, loop.0.1, and loop.g.0 each have 1.
- EXPECT_N_ELEMENTS_EQ(4, ExpectedLoops, VisitedLoops1);
- EXPECT_EQ(6, AnalyzedBlockCount1);
+ // Compute the analyses across all of 'f' first.
+ EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandleA, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandleB, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandleA, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLAHandleB, run(HasName("loop.0"), _, _));
- EXPECT_N_ELEMENTS_EQ(4, ExpectedLoops, VisitedLoops2);
- // The block from loop.g.0 won't be counted, since it wasn't cached.
- EXPECT_EQ(5, AnalyzedBlockCount2);
+ // Now we invalidate AnalysisB (but not AnalysisA) for one of the loops and
+ // preserve everything for the rest. This in turn triggers that one loop to
+ // recompute both AnalysisB *and* AnalysisA if indirect invalidation is
+ // working.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(InvokeWithoutArgs([] {
+ auto PA = getLoopPassPreservedAnalyses();
+ // Specifically preserve AnalysisA so that it would survive if it
+ // didn't depend on AnalysisB.
+ PA.preserve<AnalysisA>();
+ return PA;
+ }));
+ // It happens that AnalysisB is invalidated first. That shouldn't matter
+ // though, and we should still call AnalysisA's invalidation.
+ EXPECT_CALL(MLAHandleB, invalidate(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandleA, invalidate(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke([](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
+ (void)AM.getResult<AnalysisA>(L, AR);
+ return PreservedAnalyses::all();
+ }));
+ EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _));
+ // The rest of the loops should run and get cached results.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
+ (void)AM.getResult<AnalysisA>(L, AR);
+ return PreservedAnalyses::all();
+ }));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
+ (void)AM.getResult<AnalysisA>(L, AR);
+ return PreservedAnalyses::all();
+ }));
- // The first LPM runs the loop analysis for all four loops, the second uses
- // cached results for everything.
- EXPECT_EQ(4, LoopAnalysisRuns);
+ // The run over 'g' should be boring, with us just computing the analyses once
+ // up front and then running loop passes and getting cached results.
+ EXPECT_CALL(MLAHandleA, run(HasName("loop.g.0"), _, _));
+ EXPECT_CALL(MLAHandleB, run(HasName("loop.g.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &) {
+ (void)AM.getResult<AnalysisA>(L, AR);
+ return PreservedAnalyses::all();
+ }));
+
+ // Build the pipeline and run it.
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ FPM.addPass(
+ createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass<AnalysisA>()));
+ LoopPassManager LPM(true);
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, IndirectOuterPassInvalidation) {
+ typedef decltype(MLAHandle)::Analysis LoopAnalysis;
+
+ MockFunctionAnalysisHandle MFAHandle;
+ FAM.registerPass([&] { return MFAHandle.getAnalysis(); });
+ typedef decltype(MFAHandle)::Analysis FunctionAnalysis;
+
+ // Set up the loop analysis to depend on both the function and module
+ // analysis.
+ ON_CALL(MLAHandle, run(_, _, _))
+ .WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR) {
+ auto &FAMP = AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR);
+ auto &FAM = FAMP.getManager();
+ Function &F = *L.getHeader()->getParent();
+ if (auto *FA = FAM.getCachedResult<FunctionAnalysis>(F))
+ FAMP.registerOuterAnalysisInvalidation<FunctionAnalysis,
+ LoopAnalysis>();
+ return MLAHandle.getResult();
+ }));
+
+ ::testing::InSequence MakeExpectationsSequenced;
+
+ // Compute the analyses across all of 'f' first.
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _))
+ .WillOnce(Invoke([](Function &F, FunctionAnalysisManager &AM) {
+ // Force the computing of the function analysis so it is available in
+ // this function.
+ (void)AM.getResult<FunctionAnalysis>(F);
+ return PreservedAnalyses::all();
+ }));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+
+ // Now invalidate the function analysis but preserve the loop analyses.
+ // This should trigger immediate invalidation of the loop analyses, despite
+ // the fact that they were preserved.
+ EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = getLoopPassPreservedAnalyses();
+ PA.preserveSet<AllAnalysesOn<Loop>>();
+ return PA;
+ }));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _));
+
+ // And re-running a requires pass recomputes them.
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+
+ // When we run over 'g' we don't populate the cache with the function
+ // analysis.
+ EXPECT_CALL(MFPHandle, run(HasName("g"), _))
+ .WillOnce(Return(PreservedAnalyses::all()));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _));
+
+ // Which means that no extra invalidation occurs and cached values are used.
+ EXPECT_CALL(MFPHandle, run(HasName("g"), _)).WillOnce(InvokeWithoutArgs([] {
+ auto PA = getLoopPassPreservedAnalyses();
+ PA.preserveSet<AllAnalysesOn<Loop>>();
+ return PA;
+ }));
+
+ // Build the pipeline and run it.
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(
+ createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass<LoopAnalysis>()));
+ FPM.addPass(MFPHandle.getPass());
+ FPM.addPass(
+ createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass<LoopAnalysis>()));
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, LoopChildInsertion) {
+ // Super boring module with three loops in a single loop nest.
+ M = parseIR(Context, "define void @f() {\n"
+ "entry:\n"
+ " br label %loop.0\n"
+ "loop.0:\n"
+ " br i1 undef, label %loop.0.0, label %end\n"
+ "loop.0.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.0.1\n"
+ "loop.0.1:\n"
+ " br i1 undef, label %loop.0.1, label %loop.0.2\n"
+ "loop.0.2:\n"
+ " br i1 undef, label %loop.0.2, label %loop.0\n"
+ "end:\n"
+ " ret void\n"
+ "}\n");
+
+ // Build up variables referring into the IR so we can rewrite it below
+ // easily.
+ Function &F = *M->begin();
+ ASSERT_THAT(F, HasName("f"));
+ auto BBI = F.begin();
+ BasicBlock &EntryBB = *BBI++;
+ ASSERT_THAT(EntryBB, HasName("entry"));
+ BasicBlock &Loop0BB = *BBI++;
+ ASSERT_THAT(Loop0BB, HasName("loop.0"));
+ BasicBlock &Loop00BB = *BBI++;
+ ASSERT_THAT(Loop00BB, HasName("loop.0.0"));
+ BasicBlock &Loop01BB = *BBI++;
+ ASSERT_THAT(Loop01BB, HasName("loop.0.1"));
+ BasicBlock &Loop02BB = *BBI++;
+ ASSERT_THAT(Loop02BB, HasName("loop.0.2"));
+ BasicBlock &EndBB = *BBI++;
+ ASSERT_THAT(EndBB, HasName("end"));
+ ASSERT_THAT(BBI, F.end());
+
+ // Build the pass managers and register our pipeline. We build a single loop
+ // pass pipeline consisting of three mock pass runs over each loop. After
+ // this we run both domtree and loop verification passes to make sure that
+ // the IR remained valid during our mutations.
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ LoopPassManager LPM(true);
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ FPM.addPass(DominatorTreeVerifierPass());
+ FPM.addPass(LoopVerifierPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+
+ // All the visit orders are deterministic, so we use simple fully order
+ // expectations.
+ ::testing::InSequence MakeExpectationsSequenced;
+
+ // We run loop passes three times over each of the loops.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+
+ // When running over the middle loop, the second run inserts two new child
+ // loops, inserting them and itself into the worklist.
+ BasicBlock *NewLoop010BB;
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &Updater) {
+ auto *NewLoop = new Loop();
+ L.addChildLoop(NewLoop);
+ NewLoop010BB = BasicBlock::Create(Context, "loop.0.1.0", &F, &Loop02BB);
+ BranchInst::Create(&Loop01BB, NewLoop010BB,
+ UndefValue::get(Type::getInt1Ty(Context)),
+ NewLoop010BB);
+ Loop01BB.getTerminator()->replaceUsesOfWith(&Loop01BB, NewLoop010BB);
+ AR.DT.addNewBlock(NewLoop010BB, &Loop01BB);
+ NewLoop->addBasicBlockToLoop(NewLoop010BB, AR.LI);
+ Updater.addChildLoops({NewLoop});
+ return PreservedAnalyses::all();
+ }));
+
+ // We should immediately drop down to fully visit the new inner loop.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // After visiting the inner loop, we should re-visit the second loop
+ // reflecting its new loop nest structure.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+
+ // In the second run over the middle loop after we've visited the new child,
+ // we add another child to check that we can repeatedly add children, and add
+ // children to a loop that already has children.
+ BasicBlock *NewLoop011BB;
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &Updater) {
+ auto *NewLoop = new Loop();
+ L.addChildLoop(NewLoop);
+ NewLoop011BB = BasicBlock::Create(Context, "loop.0.1.1", &F, &Loop02BB);
+ BranchInst::Create(&Loop01BB, NewLoop011BB,
+ UndefValue::get(Type::getInt1Ty(Context)),
+ NewLoop011BB);
+ NewLoop010BB->getTerminator()->replaceUsesOfWith(&Loop01BB,
+ NewLoop011BB);
+ AR.DT.addNewBlock(NewLoop011BB, NewLoop010BB);
+ NewLoop->addBasicBlockToLoop(NewLoop011BB, AR.LI);
+ Updater.addChildLoops({NewLoop});
+ return PreservedAnalyses::all();
+ }));
+
+ // Again, we should immediately drop down to visit the new, unvisited child
+ // loop. We don't need to revisit the other child though.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.1"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // And now we should pop back up to the second loop and do a full pipeline of
+ // three passes on its current form.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .Times(3)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // Now that all the expected actions are registered, run the pipeline over
+ // our module. All of our expectations are verified when the test finishes.
+ MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, LoopPeerInsertion) {
+ // Super boring module with two loop nests and loop nest with two child
+ // loops.
+ M = parseIR(Context, "define void @f() {\n"
+ "entry:\n"
+ " br label %loop.0\n"
+ "loop.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.2\n"
+ "loop.0.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.0.2\n"
+ "loop.0.2:\n"
+ " br i1 undef, label %loop.0.2, label %loop.0\n"
+ "loop.2:\n"
+ " br i1 undef, label %loop.2, label %end\n"
+ "end:\n"
+ " ret void\n"
+ "}\n");
+
+ // Build up variables referring into the IR so we can rewrite it below
+ // easily.
+ Function &F = *M->begin();
+ ASSERT_THAT(F, HasName("f"));
+ auto BBI = F.begin();
+ BasicBlock &EntryBB = *BBI++;
+ ASSERT_THAT(EntryBB, HasName("entry"));
+ BasicBlock &Loop0BB = *BBI++;
+ ASSERT_THAT(Loop0BB, HasName("loop.0"));
+ BasicBlock &Loop00BB = *BBI++;
+ ASSERT_THAT(Loop00BB, HasName("loop.0.0"));
+ BasicBlock &Loop02BB = *BBI++;
+ ASSERT_THAT(Loop02BB, HasName("loop.0.2"));
+ BasicBlock &Loop2BB = *BBI++;
+ ASSERT_THAT(Loop2BB, HasName("loop.2"));
+ BasicBlock &EndBB = *BBI++;
+ ASSERT_THAT(EndBB, HasName("end"));
+ ASSERT_THAT(BBI, F.end());
+ Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context));
+
+ // Build the pass managers and register our pipeline. We build a single loop
+ // pass pipeline consisting of three mock pass runs over each loop. After
+ // this we run both domtree and loop verification passes to make sure that
+ // the IR remained valid during our mutations.
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ LoopPassManager LPM(true);
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ FPM.addPass(DominatorTreeVerifierPass());
+ FPM.addPass(LoopVerifierPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+
+ // All the visit orders are deterministic, so we use simple fully order
+ // expectations.
+ ::testing::InSequence MakeExpectationsSequenced;
+
+ // We run loop passes three times over each of the loops.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+
+ // On the second run, we insert a sibling loop.
+ BasicBlock *NewLoop01BB;
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &Updater) {
+ auto *NewLoop = new Loop();
+ L.getParentLoop()->addChildLoop(NewLoop);
+ NewLoop01BB = BasicBlock::Create(Context, "loop.0.1", &F, &Loop02BB);
+ BranchInst::Create(&Loop02BB, NewLoop01BB, Undefi1, NewLoop01BB);
+ Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, NewLoop01BB);
+ auto *NewDTNode = AR.DT.addNewBlock(NewLoop01BB, &Loop00BB);
+ AR.DT.changeImmediateDominator(AR.DT[&Loop02BB], NewDTNode);
+ NewLoop->addBasicBlockToLoop(NewLoop01BB, AR.LI);
+ Updater.addSiblingLoops({NewLoop});
+ return PreservedAnalyses::all();
+ }));
+ // We finish processing this loop as sibling loops don't perturb the
+ // postorder walk.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+
+ // We visit the inserted sibling next.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ // Next, on the third pass run on the last inner loop we add more new
+ // siblings, more than one, and one with nested child loops. By doing this at
+ // the end we make sure that edge case works well.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &Updater) {
+ Loop *NewLoops[] = {new Loop(), new Loop(), new Loop()};
+ L.getParentLoop()->addChildLoop(NewLoops[0]);
+ L.getParentLoop()->addChildLoop(NewLoops[1]);
+ NewLoops[1]->addChildLoop(NewLoops[2]);
+ auto *NewLoop03BB =
+ BasicBlock::Create(Context, "loop.0.3", &F, &Loop2BB);
+ auto *NewLoop04BB =
+ BasicBlock::Create(Context, "loop.0.4", &F, &Loop2BB);
+ auto *NewLoop040BB =
+ BasicBlock::Create(Context, "loop.0.4.0", &F, &Loop2BB);
+ Loop02BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB);
+ BranchInst::Create(NewLoop04BB, NewLoop03BB, Undefi1, NewLoop03BB);
+ BranchInst::Create(&Loop0BB, NewLoop040BB, Undefi1, NewLoop04BB);
+ BranchInst::Create(NewLoop04BB, NewLoop040BB, Undefi1, NewLoop040BB);
+ AR.DT.addNewBlock(NewLoop03BB, &Loop02BB);
+ AR.DT.addNewBlock(NewLoop04BB, NewLoop03BB);
+ AR.DT.addNewBlock(NewLoop040BB, NewLoop04BB);
+ NewLoops[0]->addBasicBlockToLoop(NewLoop03BB, AR.LI);
+ NewLoops[1]->addBasicBlockToLoop(NewLoop04BB, AR.LI);
+ NewLoops[2]->addBasicBlockToLoop(NewLoop040BB, AR.LI);
+ Updater.addSiblingLoops({NewLoops[0], NewLoops[1]});
+ return PreservedAnalyses::all();
+ }));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // Note that we need to visit the inner loop of this added sibling before the
+ // sibling itself!
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.4.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.4"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // And only now do we visit the outermost loop of the nest.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ // On the second pass, we add sibling loops which become new top-level loops.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &Updater) {
+ auto *NewLoop = new Loop();
+ AR.LI.addTopLevelLoop(NewLoop);
+ auto *NewLoop1BB = BasicBlock::Create(Context, "loop.1", &F, &Loop2BB);
+ BranchInst::Create(&Loop2BB, NewLoop1BB, Undefi1, NewLoop1BB);
+ Loop0BB.getTerminator()->replaceUsesOfWith(&Loop2BB, NewLoop1BB);
+ auto *NewDTNode = AR.DT.addNewBlock(NewLoop1BB, &Loop0BB);
+ AR.DT.changeImmediateDominator(AR.DT[&Loop2BB], NewDTNode);
+ NewLoop->addBasicBlockToLoop(NewLoop1BB, AR.LI);
+ Updater.addSiblingLoops({NewLoop});
+ return PreservedAnalyses::all();
+ }));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.1"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.2"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // Now that all the expected actions are registered, run the pipeline over
+ // our module. All of our expectations are verified when the test finishes.
+ MPM.run(*M, MAM);
+}
+
+TEST_F(LoopPassManagerTest, LoopDeletion) {
+ // Build a module with a single loop nest that contains one outer loop with
+ // three subloops, and one of those with its own subloop. We will
+ // incrementally delete all of these to test different deletion scenarios.
+ M = parseIR(Context, "define void @f() {\n"
+ "entry:\n"
+ " br label %loop.0\n"
+ "loop.0:\n"
+ " br i1 undef, label %loop.0.0, label %end\n"
+ "loop.0.0:\n"
+ " br i1 undef, label %loop.0.0, label %loop.0.1\n"
+ "loop.0.1:\n"
+ " br i1 undef, label %loop.0.1, label %loop.0.2\n"
+ "loop.0.2:\n"
+ " br i1 undef, label %loop.0.2.0, label %loop.0\n"
+ "loop.0.2.0:\n"
+ " br i1 undef, label %loop.0.2.0, label %loop.0.2\n"
+ "end:\n"
+ " ret void\n"
+ "}\n");
+
+ // Build up variables referring into the IR so we can rewrite it below
+ // easily.
+ Function &F = *M->begin();
+ ASSERT_THAT(F, HasName("f"));
+ auto BBI = F.begin();
+ BasicBlock &EntryBB = *BBI++;
+ ASSERT_THAT(EntryBB, HasName("entry"));
+ BasicBlock &Loop0BB = *BBI++;
+ ASSERT_THAT(Loop0BB, HasName("loop.0"));
+ BasicBlock &Loop00BB = *BBI++;
+ ASSERT_THAT(Loop00BB, HasName("loop.0.0"));
+ BasicBlock &Loop01BB = *BBI++;
+ ASSERT_THAT(Loop01BB, HasName("loop.0.1"));
+ BasicBlock &Loop02BB = *BBI++;
+ ASSERT_THAT(Loop02BB, HasName("loop.0.2"));
+ BasicBlock &Loop020BB = *BBI++;
+ ASSERT_THAT(Loop020BB, HasName("loop.0.2.0"));
+ BasicBlock &EndBB = *BBI++;
+ ASSERT_THAT(EndBB, HasName("end"));
+ ASSERT_THAT(BBI, F.end());
+ Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context));
+
+ // Helper to do the actual deletion of a loop. We directly encode this here
+ // to isolate ourselves from the rest of LLVM and for simplicity. Here we can
+ // egregiously cheat based on knowledge of the test case. For example, we
+ // have no PHI nodes and there is always a single i-dom.
+ auto DeleteLoopBlocks = [](Loop &L, BasicBlock &IDomBB,
+ LoopStandardAnalysisResults &AR,
+ LPMUpdater &Updater) {
+ for (BasicBlock *LoopBB : L.blocks()) {
+ SmallVector<DomTreeNode *, 4> ChildNodes(AR.DT[LoopBB]->begin(),
+ AR.DT[LoopBB]->end());
+ for (DomTreeNode *ChildNode : ChildNodes)
+ AR.DT.changeImmediateDominator(ChildNode, AR.DT[&IDomBB]);
+ AR.DT.eraseNode(LoopBB);
+ LoopBB->dropAllReferences();
+ }
+ SmallVector<BasicBlock *, 4> LoopBBs(L.block_begin(), L.block_end());
+ Updater.markLoopAsDeleted(L);
+ AR.LI.markAsRemoved(&L);
+ for (BasicBlock *LoopBB : LoopBBs)
+ LoopBB->eraseFromParent();
+ };
+
+ // Build up the pass managers.
+ ModulePassManager MPM(true);
+ FunctionPassManager FPM(true);
+ // We run several loop pass pipelines across the loop nest, but they all take
+ // the same form of three mock pass runs in a loop pipeline followed by
+ // domtree and loop verification. We use a lambda to stamp this out each
+ // time.
+ auto AddLoopPipelineAndVerificationPasses = [&] {
+ LoopPassManager LPM(true);
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ LPM.addPass(MLPHandle.getPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM)));
+ FPM.addPass(DominatorTreeVerifierPass());
+ FPM.addPass(LoopVerifierPass());
+ };
+
+ // All the visit orders are deterministic so we use simple fully order
+ // expectations.
+ ::testing::InSequence MakeExpectationsSequenced;
+
+ // We run the loop pipeline with three passes over each of the loops. When
+ // running over the middle loop, the second pass in the pipeline deletes it.
+ // This should prevent the third pass from visiting it but otherwise leave
+ // the process unimpacted.
+ AddLoopPipelineAndVerificationPasses();
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _))
+ .WillOnce(
+ Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
+ AR.SE.forgetLoop(&L);
+ Loop00BB.getTerminator()->replaceUsesOfWith(&Loop01BB, &Loop02BB);
+ DeleteLoopBlocks(L, Loop00BB, AR, Updater);
+ return PreservedAnalyses::all();
+ }));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.2.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // Run the loop pipeline again. This time we delete the last loop, which
+ // contains a nested loop within it, and we reuse its inner loop object to
+ // insert a new loop into the nest. This makes sure that we don't reuse
+ // cached analysis results for loop objects when removed just because their
+ // pointers match, and that we can handle nested loop deletion.
+ AddLoopPipelineAndVerificationPasses();
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .Times(3)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _))
+ .Times(3)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ BasicBlock *NewLoop03BB;
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _))
+ .WillOnce(
+ Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
+ // Delete the inner loop first. we also do this manually because we
+ // want to preserve the loop object and reuse it.
+ AR.SE.forgetLoop(*L.begin());
+ Loop02BB.getTerminator()->replaceUsesOfWith(&Loop020BB, &Loop02BB);
+ assert(std::next((*L.begin())->block_begin()) ==
+ (*L.begin())->block_end() &&
+ "There should only be one block.");
+ assert(AR.DT[&Loop020BB]->getNumChildren() == 0 &&
+ "Cannot have children in the domtree!");
+ AR.DT.eraseNode(&Loop020BB);
+ Updater.markLoopAsDeleted(**L.begin());
+ AR.LI.removeBlock(&Loop020BB);
+ auto *OldL = L.removeChildLoop(L.begin());
+ Loop020BB.eraseFromParent();
+
+ auto *ParentL = L.getParentLoop();
+ AR.SE.forgetLoop(&L);
+ Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, &Loop0BB);
+ DeleteLoopBlocks(L, Loop00BB, AR, Updater);
+
+ // Now insert a new sibling loop, reusing a loop pointer.
+ ParentL->addChildLoop(OldL);
+ NewLoop03BB = BasicBlock::Create(Context, "loop.0.3", &F, &EndBB);
+ BranchInst::Create(&Loop0BB, NewLoop03BB, Undefi1, NewLoop03BB);
+ Loop00BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB);
+ AR.DT.addNewBlock(NewLoop03BB, &Loop00BB);
+ OldL->addBasicBlockToLoop(NewLoop03BB, AR.LI);
+ Updater.addSiblingLoops({OldL});
+ return PreservedAnalyses::all();
+ }));
+
+ // To respect our inner-to-outer traversal order, we must visit the
+ // newly-inserted sibling of the loop we just deleted before we visit the
+ // outer loop. When we do so, this must compute a fresh analysis result, even
+ // though our new loop has the same pointer value as the loop we deleted.
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
+ .Times(2)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .Times(3)
+ .WillRepeatedly(Invoke(getLoopAnalysisResult));
+
+ // In the final loop pipeline run we delete every loop, including the last
+ // loop of the nest. We do this again in the second pass in the pipeline, and
+ // as a consequence we never make it to three runs on any loop. We also cover
+ // deleting multiple loops in a single pipeline, deleting the first loop and
+ // deleting the (last) top level loop.
+ AddLoopPipelineAndVerificationPasses();
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _))
+ .WillOnce(
+ Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
+ AR.SE.forgetLoop(&L);
+ Loop0BB.getTerminator()->replaceUsesOfWith(&Loop00BB, NewLoop03BB);
+ DeleteLoopBlocks(L, Loop0BB, AR, Updater);
+ return PreservedAnalyses::all();
+ }));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _))
+ .WillOnce(
+ Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
+ AR.SE.forgetLoop(&L);
+ Loop0BB.getTerminator()->replaceUsesOfWith(NewLoop03BB, &Loop0BB);
+ DeleteLoopBlocks(L, Loop0BB, AR, Updater);
+ return PreservedAnalyses::all();
+ }));
+
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(Invoke(getLoopAnalysisResult));
+ EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _))
+ .WillOnce(
+ Invoke([&](Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &Updater) {
+ AR.SE.forgetLoop(&L);
+ EntryBB.getTerminator()->replaceUsesOfWith(&Loop0BB, &EndBB);
+ DeleteLoopBlocks(L, EntryBB, AR, Updater);
+ return PreservedAnalyses::all();
+ }));
+
+ // Add the function pass pipeline now that it is fully built up and run it
+ // over the module's one function.
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ MPM.run(*M, MAM);
}
}
OpenPOWER on IntegriCloud