summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/Analysis.cpp2
-rw-r--r--llvm/lib/Analysis/Delinearization.cpp4
-rw-r--r--llvm/lib/Analysis/DependenceAnalysis.cpp6
-rw-r--r--llvm/lib/Analysis/IVUsers.cpp6
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp6
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp254
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp6
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp67
-rw-r--r--llvm/lib/CodeGen/MachineFunctionPass.cpp2
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp1
-rw-r--r--llvm/lib/Passes/PassRegistry.def2
-rw-r--r--llvm/lib/Target/PowerPC/PPCCTRLoops.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp8
-rw-r--r--llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/IndVarSimplify.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp11
-rw-r--r--llvm/lib/Transforms/Scalar/LoopDeletion.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInterchange.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopRerollPass.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopRotation.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp12
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnswitch.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/NaryReassociate.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/LCSSA.cpp5
-rw-r--r--llvm/lib/Transforms/Utils/LoopSimplify.cpp5
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnroll.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp13
-rw-r--r--llvm/lib/Transforms/Vectorize/BBVectorize.cpp10
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp6
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp6
38 files changed, 299 insertions, 251 deletions
diff --git a/llvm/lib/Analysis/Analysis.cpp b/llvm/lib/Analysis/Analysis.cpp
index 1ca151da94f..91e7339337a 100644
--- a/llvm/lib/Analysis/Analysis.cpp
+++ b/llvm/lib/Analysis/Analysis.cpp
@@ -63,7 +63,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeRegionPrinterPass(Registry);
initializeRegionOnlyViewerPass(Registry);
initializeRegionOnlyPrinterPass(Registry);
- initializeScalarEvolutionPass(Registry);
+ initializeScalarEvolutionWrapperPassPass(Registry);
initializeScalarEvolutionAliasAnalysisPass(Registry);
initializeTargetTransformInfoWrapperPassPass(Registry);
initializeTypeBasedAliasAnalysisPass(Registry);
diff --git a/llvm/lib/Analysis/Delinearization.cpp b/llvm/lib/Analysis/Delinearization.cpp
index 9d157860326..8e65c4c469a 100644
--- a/llvm/lib/Analysis/Delinearization.cpp
+++ b/llvm/lib/Analysis/Delinearization.cpp
@@ -60,12 +60,12 @@ public:
void Delinearization::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
}
bool Delinearization::runOnFunction(Function &F) {
this->F = &F;
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
return false;
}
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp
index 4ee82f0f134..0e6f222e52e 100644
--- a/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -117,7 +117,7 @@ Delinearize("da-delinearize", cl::init(false), cl::Hidden, cl::ZeroOrMore,
INITIALIZE_PASS_BEGIN(DependenceAnalysis, "da",
"Dependence Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(DependenceAnalysis, "da",
"Dependence Analysis", true, true)
@@ -133,7 +133,7 @@ FunctionPass *llvm::createDependenceAnalysisPass() {
bool DependenceAnalysis::runOnFunction(Function &F) {
this->F = &F;
AA = &getAnalysis<AliasAnalysis>();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
return false;
}
@@ -146,7 +146,7 @@ void DependenceAnalysis::releaseMemory() {
void DependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<AliasAnalysis>();
- AU.addRequiredTransitive<ScalarEvolution>();
+ AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
AU.addRequiredTransitive<LoopInfoWrapperPass>();
}
diff --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index 926787d3be9..a1c6b72f9f0 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -39,7 +39,7 @@ INITIALIZE_PASS_BEGIN(IVUsers, "iv-users",
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(IVUsers, "iv-users",
"Induction Variable Users", false, true)
@@ -255,7 +255,7 @@ void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.setPreservesAll();
}
@@ -266,7 +266,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
*L->getHeader()->getParent());
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
// Collect ephemeral values so that AddUsersIfInteresting skips them.
EphValues.clear();
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 13d1235710e..b931589ed04 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1794,7 +1794,7 @@ void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
}
bool LoopAccessAnalysis::runOnFunction(Function &F) {
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &getAnalysis<AliasAnalysis>();
@@ -1805,7 +1805,7 @@ bool LoopAccessAnalysis::runOnFunction(Function &F) {
}
void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
@@ -1819,7 +1819,7 @@ static const char laa_name[] = "Loop Access Analysis";
INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 10d3acb46af..7cf08bd7156 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -114,16 +114,6 @@ static cl::opt<bool>
VerifySCEV("verify-scev",
cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
-INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
- "Scalar Evolution Analysis", false, true)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
- "Scalar Evolution Analysis", false, true)
-char ScalarEvolution::ID = 0;
-
//===----------------------------------------------------------------------===//
// SCEV class definitions
//===----------------------------------------------------------------------===//
@@ -1983,7 +1973,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
// Sort by complexity, this groups all similar expression types together.
- GroupByComplexity(Ops, LI);
+ GroupByComplexity(Ops, &LI);
// If there are any constants, fold them together.
unsigned Idx = 0;
@@ -2391,7 +2381,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
// Sort by complexity, this groups all similar expression types together.
- GroupByComplexity(Ops, LI);
+ GroupByComplexity(Ops, &LI);
// If there are any constants, fold them together.
unsigned Idx = 0;
@@ -2859,10 +2849,10 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
const Loop *NestedLoop = NestedAR->getLoop();
- if (L->contains(NestedLoop) ?
- (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
- (!NestedLoop->contains(L) &&
- DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
+ if (L->contains(NestedLoop)
+ ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
+ : (!NestedLoop->contains(L) &&
+ DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
NestedAR->op_end());
Operands[0] = NestedAR->getStart();
@@ -2997,7 +2987,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
#endif
// Sort by complexity, this groups all similar expression types together.
- GroupByComplexity(Ops, LI);
+ GroupByComplexity(Ops, &LI);
// If there are any constants, fold them together.
unsigned Idx = 0;
@@ -3101,7 +3091,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
#endif
// Sort by complexity, this groups all similar expression types together.
- GroupByComplexity(Ops, LI);
+ GroupByComplexity(Ops, &LI);
// If there are any constants, fold them together.
unsigned Idx = 0;
@@ -3202,7 +3192,7 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
return getConstant(IntTy,
- F->getParent()->getDataLayout().getTypeAllocSize(AllocTy));
+ F.getParent()->getDataLayout().getTypeAllocSize(AllocTy));
}
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
@@ -3213,7 +3203,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
// This is just a compile-time optimization.
return getConstant(
IntTy,
- F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
+ F.getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
FieldNo));
}
@@ -3256,7 +3246,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const {
/// for which isSCEVable must return true.
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
- return F->getParent()->getDataLayout().getTypeSizeInBits(Ty);
+ return F.getParent()->getDataLayout().getTypeSizeInBits(Ty);
}
/// getEffectiveSCEVType - Return a type with the same bitwidth as
@@ -3272,11 +3262,11 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
- return F->getParent()->getDataLayout().getIntPtrType(Ty);
+ return F.getParent()->getDataLayout().getIntPtrType(Ty);
}
const SCEV *ScalarEvolution::getCouldNotCompute() {
- return &CouldNotCompute;
+ return CouldNotCompute.get();
}
namespace {
@@ -3621,7 +3611,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
/// a loop header, making it a potential recurrence, or it doesn't.
///
const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
- if (const Loop *L = LI->getLoopFor(PN->getParent()))
+ if (const Loop *L = LI.getLoopFor(PN->getParent()))
if (L->getHeader() == PN->getParent()) {
// The loop may have multiple entrances or multiple exits; we can analyze
// this phi as an addrec if it has a unique entry value and a unique
@@ -3767,9 +3757,9 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V =
- SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC))
- if (LI->replacementPreservesLCSSAForm(PN, V))
+ if (Value *V = SimplifyInstruction(PN, F.getParent()->getDataLayout(), &TLI,
+ &DT, &AC))
+ if (LI.replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
// If it's not a loop phi, we can't handle it yet.
@@ -3864,8 +3854,8 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones,
- F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
+ computeKnownBits(U->getValue(), Zeros, Ones, F.getParent()->getDataLayout(),
+ 0, &AC, nullptr, &DT);
return Zeros.countTrailingOnes();
}
@@ -4095,18 +4085,18 @@ ScalarEvolution::getRange(const SCEV *S,
// Split here to avoid paying the compile-time cost of calling both
// computeKnownBits and ComputeNumSignBits. This restriction can be lifted
// if needed.
- const DataLayout &DL = F->getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
+ computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT);
if (Ones != ~Zeros + 1)
ConservativeResult =
ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
} else {
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
"generalize as needed!");
- unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
+ unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
if (NS > 1)
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
@@ -4139,7 +4129,7 @@ SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
// recurrence, but getting that requires computing the SCEV of the operands,
// which can be expensive. This check we can do cheaply to rule out some
// cases early.
- Loop *innermostContainingLoop = LI->getLoopFor(BinOp->getParent());
+ Loop *innermostContainingLoop = LI.getLoopFor(BinOp->getParent());
if (innermostContainingLoop == nullptr ||
innermostContainingLoop->getHeader() != BinOp->getParent())
return SCEV::FlagAnyWrap;
@@ -4190,7 +4180,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// reachable. Such instructions don't matter, and they aren't required
// to obey basic rules for definitions dominating uses which this
// analysis depends on.
- if (!DT->isReachableFromEntry(I->getParent()))
+ if (!DT.isReachableFromEntry(I->getParent()))
return getUnknown(V);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
Opcode = CE->getOpcode();
@@ -4304,7 +4294,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
computeKnownBits(U->getOperand(0), KnownZero, KnownOne,
- F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
+ F.getParent()->getDataLayout(), 0, &AC, nullptr, &DT);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@@ -5012,7 +5002,7 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
// MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is
// considered greater than any computable EL.Max.
if (EL.Max != getCouldNotCompute() && Latch &&
- DT->dominates(ExitBB, Latch)) {
+ DT.dominates(ExitBB, Latch)) {
if (!MustExitMaxBECount)
MustExitMaxBECount = EL.Max;
else {
@@ -5625,7 +5615,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
unsigned NumIterations = BEs.getZExtValue(); // must be in range
unsigned IterationNum = 0;
- const DataLayout &DL = F->getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (; ; ++IterationNum) {
if (IterationNum == NumIterations)
return RetVal = CurrentIterVals[PN]; // Got exit value!
@@ -5634,7 +5624,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
DenseMap<Instruction *, Constant *> NextIterVals;
Constant *NextPHI =
- EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
+ EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
if (!NextPHI)
return nullptr; // Couldn't evaluate!
NextIterVals[PN] = NextPHI;
@@ -5659,7 +5649,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
Constant *&NextPHI = NextIterVals[PHI];
if (!NextPHI) { // Not already computed.
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
- NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
}
if (NextPHI != I->second)
StoppedEvolving = false;
@@ -5711,10 +5701,10 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
// the loop symbolically to determine when the condition gets a value of
// "ExitWhen".
unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
- const DataLayout &DL = F->getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(
- EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI));
+ EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
@@ -5744,7 +5734,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
if (NextPHI) continue; // Already computed!
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
- NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
}
CurrentIterVals.swap(NextIterVals);
}
@@ -5889,7 +5879,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// exit value from the loop without using SCEVs.
if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
- const Loop *LI = (*this->LI)[I->getParent()];
+ const Loop *LI = this->LI[I->getParent()];
if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
if (PHINode *PN = dyn_cast<PHINode>(I))
if (PN->getParent() == LI->getHeader()) {
@@ -5947,16 +5937,16 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Check to see if getSCEVAtScope actually made an improvement.
if (MadeImprovement) {
Constant *C = nullptr;
- const DataLayout &DL = F->getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
- Operands[1], DL, TLI);
+ Operands[1], DL, &TLI);
else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
} else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands,
- DL, TLI);
+ DL, &TLI);
if (!C) return V;
return getSCEV(C);
}
@@ -6377,7 +6367,7 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
// A loop's header is defined to be a block that dominates the loop.
// If the header has a unique predecessor outside the loop, it must be
// a block that has exactly one successor that can reach the loop.
- if (Loop *L = LI->getLoopFor(BB))
+ if (Loop *L = LI.getLoopFor(BB))
return std::make_pair(L->getLoopPredecessor(), L->getHeader());
return std::pair<BasicBlock *, BasicBlock *>();
@@ -6969,11 +6959,11 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
return true;
// Check conditions due to any @llvm.assume intrinsics.
- for (auto &AssumeVH : AC->assumptions()) {
+ for (auto &AssumeVH : AC.assumptions()) {
if (!AssumeVH)
continue;
auto *CI = cast<CallInst>(AssumeVH);
- if (!DT->dominates(CI, Latch->getTerminator()))
+ if (!DT.dominates(CI, Latch->getTerminator()))
continue;
if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
@@ -7002,12 +6992,11 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
// If the loop is not reachable from the entry block, we risk running into an
// infinite loop as we walk up into the dom tree. These loops do not matter
// anyway, so we just return a conservative answer when we see them.
- if (!DT->isReachableFromEntry(L->getHeader()))
+ if (!DT.isReachableFromEntry(L->getHeader()))
return false;
- for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()];
- DTN != HeaderDTN;
- DTN = DTN->getIDom()) {
+ for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
+ DTN != HeaderDTN; DTN = DTN->getIDom()) {
assert(DTN && "should reach the loop header before reaching the root!");
@@ -7031,7 +7020,7 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
// We're constructively (and conservatively) enumerating edges within the
// loop body that dominate the latch. The dominator tree better agree
// with us on this:
- assert(DT->dominates(DominatingEdge, Latch) && "should be!");
+ assert(DT.dominates(DominatingEdge, Latch) && "should be!");
if (isImpliedCond(Pred, LHS, RHS, Condition,
BB != ContinuePredicate->getSuccessor(0)))
@@ -7076,11 +7065,11 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- for (auto &AssumeVH : AC->assumptions()) {
+ for (auto &AssumeVH : AC.assumptions()) {
if (!AssumeVH)
continue;
auto *CI = cast<CallInst>(AssumeVH);
- if (!DT->dominates(CI, L->getHeader()))
+ if (!DT.dominates(CI, L->getHeader()))
continue;
if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
@@ -8312,22 +8301,34 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
// ScalarEvolution Class Implementation
//===----------------------------------------------------------------------===//
-ScalarEvolution::ScalarEvolution()
- : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64),
- LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) {
- initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
-}
-
-bool ScalarEvolution::runOnFunction(Function &F) {
- this->F = &F;
- AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- return false;
-}
-
-void ScalarEvolution::releaseMemory() {
+ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
+ AssumptionCache &AC, DominatorTree &DT,
+ LoopInfo &LI)
+ : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
+ CouldNotCompute(new SCEVCouldNotCompute()),
+ WalkingBEDominatingConds(false), ValuesAtScopes(64), LoopDispositions(64),
+ BlockDispositions(64), FirstUnknown(nullptr) {}
+
+ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
+ : F(Arg.F), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), LI(Arg.LI),
+ CouldNotCompute(std::move(Arg.CouldNotCompute)),
+ ValueExprMap(std::move(Arg.ValueExprMap)),
+ WalkingBEDominatingConds(false),
+ BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
+ ConstantEvolutionLoopExitValue(
+ std::move(Arg.ConstantEvolutionLoopExitValue)),
+ ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
+ LoopDispositions(std::move(Arg.LoopDispositions)),
+ BlockDispositions(std::move(Arg.BlockDispositions)),
+ UnsignedRanges(std::move(Arg.UnsignedRanges)),
+ SignedRanges(std::move(Arg.SignedRanges)),
+ UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
+ SCEVAllocator(std::move(Arg.SCEVAllocator)),
+ FirstUnknown(Arg.FirstUnknown) {
+ Arg.FirstUnknown = nullptr;
+}
+
+ScalarEvolution::~ScalarEvolution() {
// Iterate through all the SCEVUnknown instances and call their
// destructors, so that they release their references to their values.
for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
@@ -8346,24 +8347,6 @@ void ScalarEvolution::releaseMemory() {
assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
-
- BackedgeTakenCounts.clear();
- ConstantEvolutionLoopExitValue.clear();
- ValuesAtScopes.clear();
- LoopDispositions.clear();
- BlockDispositions.clear();
- UnsignedRanges.clear();
- SignedRanges.clear();
- UniqueSCEVs.clear();
- SCEVAllocator.Reset();
-}
-
-void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequiredTransitive<AssumptionCacheTracker>();
- AU.addRequiredTransitive<LoopInfoWrapperPass>();
- AU.addRequiredTransitive<DominatorTreeWrapperPass>();
- AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
}
bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
@@ -8405,7 +8388,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
OS << "\n";
}
-void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
+void ScalarEvolution::print(raw_ostream &OS) const {
// ScalarEvolution's implementation of the print method is to print
// out SCEV values of all instructions that are interesting. Doing
// this potentially causes it to create new SCEV objects though,
@@ -8415,7 +8398,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
OS << "Classifying expressions for: ";
- F->printAsOperand(OS, /*PrintType=*/false);
+ F.printAsOperand(OS, /*PrintType=*/false);
OS << "\n";
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
@@ -8430,7 +8413,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
SE.getSignedRange(SV).print(OS);
}
- const Loop *L = LI->getLoopFor((*I).getParent());
+ const Loop *L = LI.getLoopFor((*I).getParent());
const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
if (AtUse != SV) {
@@ -8458,9 +8441,9 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
}
OS << "Determining loop execution counts for: ";
- F->printAsOperand(OS, /*PrintType=*/false);
+ F.printAsOperand(OS, /*PrintType=*/false);
OS << "\n";
- for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
+ for (LoopInfo::iterator I = LI.begin(), E = LI.end(); I != E; ++I)
PrintLoopInfo(OS, &SE, *I);
}
@@ -8604,7 +8587,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
// produces the addrec's value is a PHI, and a PHI effectively properly
// dominates its entire containing block.
const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
- if (!DT->dominates(AR->getLoop()->getHeader(), BB))
+ if (!DT.dominates(AR->getLoop()->getHeader(), BB))
return DoesNotDominateBlock;
}
// FALL THROUGH into SCEVNAryExpr handling.
@@ -8641,7 +8624,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
if (I->getParent() == BB)
return DominatesBlock;
- if (DT->properlyDominates(I->getParent(), BB))
+ if (DT.properlyDominates(I->getParent(), BB))
return ProperlyDominatesBlock;
return DoesNotDominateBlock;
}
@@ -8735,24 +8718,21 @@ getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
}
}
-void ScalarEvolution::verifyAnalysis() const {
- if (!VerifySCEV)
- return;
-
+void ScalarEvolution::verify() const {
ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
// Gather stringified backedge taken counts for all loops using SCEV's caches.
// FIXME: It would be much better to store actual values instead of strings,
// but SCEV pointers will change if we drop the caches.
VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
- for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I)
getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
- // Gather stringified backedge taken counts for all loops without using
- // SCEV's caches.
- SE.releaseMemory();
- for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
- getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
+ // Gather stringified backedge taken counts for all loops using a fresh
+ // ScalarEvolution object.
+ ScalarEvolution SE2(F, TLI, AC, DT, LI);
+ for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2);
// Now compare whether they're the same with and without caches. This allows
// verifying that no pass changed the cache.
@@ -8785,3 +8765,63 @@ void ScalarEvolution::verifyAnalysis() const {
// TODO: Verify more things.
}
+
+char ScalarEvolutionAnalysis::PassID;
+
+ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
+ AnalysisManager<Function> *AM) {
+ return ScalarEvolution(F, AM->getResult<TargetLibraryAnalysis>(F),
+ AM->getResult<AssumptionAnalysis>(F),
+ AM->getResult<DominatorTreeAnalysis>(F),
+ AM->getResult<LoopAnalysis>(F));
+}
+
+PreservedAnalyses
+ScalarEvolutionPrinterPass::run(Function &F, AnalysisManager<Function> *AM) {
+ AM->getResult<ScalarEvolutionAnalysis>(F).print(OS);
+ return PreservedAnalyses::all();
+}
+
+INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true)
+char ScalarEvolutionWrapperPass::ID = 0;
+
+ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
+ initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
+}
+
+bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
+ SE.reset(new ScalarEvolution(
+ F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
+ getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
+ getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
+ getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
+ return false;
+}
+
+void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
+
+void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
+ SE->print(OS);
+}
+
+void ScalarEvolutionWrapperPass::verifyAnalysis() const {
+ if (!VerifySCEV)
+ return;
+
+ SE->verify();
+}
+
+void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<AssumptionCacheTracker>();
+ AU.addRequiredTransitive<LoopInfoWrapperPass>();
+ AU.addRequiredTransitive<DominatorTreeWrapperPass>();
+ AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
+}
diff --git a/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 58d3322683a..6fcf27ea4ab 100644
--- a/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -27,7 +27,7 @@ char ScalarEvolutionAliasAnalysis::ID = 0;
INITIALIZE_AG_PASS_BEGIN(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
"ScalarEvolution-based Alias Analysis", false, true,
false)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_AG_PASS_END(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
"ScalarEvolution-based Alias Analysis", false, true,
false)
@@ -37,14 +37,14 @@ FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
}
void ScalarEvolutionAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequiredTransitive<ScalarEvolution>();
+ AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
AU.setPreservesAll();
AliasAnalysis::getAnalysisUsage(AU);
}
bool ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) {
InitializeAliasAnalysis(this, &F.getParent()->getDataLayout());
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
return false;
}
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 7789423cf6c..d0b36431130 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -80,7 +80,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// We assert at the end of the function since IP might point to an
// instruction with different dominance properties than a cast
// (an invoke for example) and not dominate BIP (but the cast does).
- assert(SE.DT->dominates(Ret, BIP));
+ assert(SE.DT.dominates(Ret, BIP));
rememberInstruction(Ret);
return Ret;
@@ -186,7 +186,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
BuilderType::InsertPointGuard Guard(Builder);
// Move the insertion point out of as many loops as we can.
- while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
@@ -485,7 +485,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
assert(!isa<Instruction>(V) ||
- SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
+ SE.DT.dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
@@ -519,7 +519,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
BuilderType::InsertPointGuard Guard(Builder);
// Move the insertion point out of as many loops as we can.
- while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
@@ -539,7 +539,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
// Move the insertion point out of as many loops as we can.
- while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V)) break;
bool AnyIndexNotLoopInvariant = false;
@@ -605,7 +605,7 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
return nullptr;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
- return Pair.first->second = SE.LI->getLoopFor(I->getParent());
+ return Pair.first->second = SE.LI.getLoopFor(I->getParent());
// A non-instruction has no relevant loops.
return nullptr;
}
@@ -615,7 +615,7 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
I != E; ++I)
- L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
+ L = PickMostRelevantLoop(L, getRelevantLoop(*I), SE.DT);
return RelevantLoops[N] = L;
}
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
@@ -623,10 +623,8 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
return RelevantLoops[C] = Result;
}
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
- const Loop *Result =
- PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
- getRelevantLoop(D->getRHS()),
- *SE.DT);
+ const Loop *Result = PickMostRelevantLoop(
+ getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
return RelevantLoops[D] = Result;
}
llvm_unreachable("Unexpected SCEV type!");
@@ -681,7 +679,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// Sort by loop. Use a stable sort so that constants follow non-constants and
// pointer operands precede non-pointer operands.
- std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
+ std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
// Emit instructions to add all the operands. Hoist as much as possible
// out of loops, and form meaningful getelementptrs where possible.
@@ -749,7 +747,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants.
- std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
+ std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
// Emit instructions to mul all the operands. Hoist as much as possible
// out of loops.
@@ -836,7 +834,7 @@ bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
for (User::op_iterator OI = IncV->op_begin()+1,
OE = IncV->op_end(); OI != OE; ++OI)
if (Instruction *OInst = dyn_cast<Instruction>(OI))
- if (!SE.DT->dominates(OInst, IVIncInsertPos))
+ if (!SE.DT.dominates(OInst, IVIncInsertPos))
return false;
}
// Advance to the next instruction.
@@ -875,7 +873,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
case Instruction::Add:
case Instruction::Sub: {
Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
- if (!OInst || SE.DT->dominates(OInst, InsertPos))
+ if (!OInst || SE.DT.dominates(OInst, InsertPos))
return dyn_cast<Instruction>(IncV->getOperand(0));
return nullptr;
}
@@ -887,7 +885,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
if (isa<Constant>(*I))
continue;
if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
- if (!SE.DT->dominates(OInst, InsertPos))
+ if (!SE.DT.dominates(OInst, InsertPos))
return nullptr;
}
if (allowScale) {
@@ -914,13 +912,13 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
/// it available to other uses in this loop. Recursively hoist any operands,
/// until we reach a value that dominates InsertPos.
bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
- if (SE.DT->dominates(IncV, InsertPos))
+ if (SE.DT.dominates(IncV, InsertPos))
return true;
// InsertPos must itself dominate IncV so that IncV's new position satisfies
// its existing users.
- if (isa<PHINode>(InsertPos)
- || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
+ if (isa<PHINode>(InsertPos) ||
+ !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
return false;
// Check that the chain of IV operands leading back to Phi can be hoisted.
@@ -932,7 +930,7 @@ bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
// IncV is safe to hoist.
IVIncs.push_back(IncV);
IncV = Oper;
- if (SE.DT->dominates(IncV, InsertPos))
+ if (SE.DT.dominates(IncV, InsertPos))
break;
}
for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
@@ -1086,8 +1084,9 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Only try partially matching scevs that need truncation and/or
// step-inversion if we know this loop is outside the current loop.
- bool TryNonMatchingSCEV = IVIncInsertLoop &&
- SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
+ bool TryNonMatchingSCEV =
+ IVIncInsertLoop &&
+ SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I) {
@@ -1144,7 +1143,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Potentially, move the increment. We have made sure in
// isExpandedAddRecExprPHI or hoistIVInc that this is possible.
if (L == IVIncInsertLoop)
- hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
+ hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
@@ -1174,8 +1173,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// StartV must be hoisted into L's preheader to dominate the new phi.
assert(!isa<Instruction>(StartV) ||
- SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
- L->getHeader()));
+ SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
+ L->getHeader()));
// Expand code for the step value. Do this before creating the PHI so that PHI
// reuse code doesn't see an incomplete PHI.
@@ -1251,9 +1250,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
if (PostIncLoops.count(L)) {
PostIncLoopSet Loops;
Loops.insert(L);
- Normalized =
- cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
- nullptr, Loops, SE, *SE.DT));
+ Normalized = cast<SCEVAddRecExpr>(TransformForPostIncUse(
+ Normalize, S, nullptr, nullptr, Loops, SE, SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
@@ -1303,9 +1301,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// For an expansion to use the postinc form, the client must call
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
- if (isa<Instruction>(Result)
- && !SE.DT->dominates(cast<Instruction>(Result),
- Builder.GetInsertPoint())) {
+ if (isa<Instruction>(Result) &&
+ !SE.DT.dominates(cast<Instruction>(Result), Builder.GetInsertPoint())) {
// The induction variable's postinc expansion does not dominate this use.
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
@@ -1608,7 +1605,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
// Compute an insertion point for this SCEV object. Hoist the instructions
// as far out in the loop nest as possible.
Instruction *InsertPt = Builder.GetInsertPoint();
- for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
+ for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
L = L->getParentLoop())
if (SE.isLoopInvariant(S, L)) {
if (!L) break;
@@ -1719,7 +1716,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// Fold constant phis. They may be congruent to other constant phis and
// would confuse the logic below that expects proper IVs.
- if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) {
+ if (Value *V = SimplifyInstruction(Phi, DL, &SE.TLI, &SE.DT, &SE.AC)) {
Phi->replaceAllUsesWith(V);
DeadInsts.emplace_back(Phi);
++NumElim;
@@ -1832,10 +1829,10 @@ Value *SCEVExpander::findExistingExpansion(const SCEV *S,
TrueBB, FalseBB)))
continue;
- if (SE.getSCEV(LHS) == S && SE.DT->dominates(LHS, At))
+ if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
return LHS;
- if (SE.getSCEV(RHS) == S && SE.DT->dominates(RHS, At))
+ if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
return RHS;
}
diff --git a/llvm/lib/CodeGen/MachineFunctionPass.cpp b/llvm/lib/CodeGen/MachineFunctionPass.cpp
index aaf06a70da7..cc965947bbd 100644
--- a/llvm/lib/CodeGen/MachineFunctionPass.cpp
+++ b/llvm/lib/CodeGen/MachineFunctionPass.cpp
@@ -55,7 +55,7 @@ void MachineFunctionPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<IVUsers>();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<MemoryDependenceAnalysis>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addPreserved<StackProtector>();
FunctionPass::getAnalysisUsage(AU);
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index ba7132050e9..7c683472cc2 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -20,6 +20,7 @@
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index d768a3ad1d2..76b19df1679 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -54,6 +54,7 @@ FUNCTION_ANALYSIS("assumptions", AssumptionAnalysis())
FUNCTION_ANALYSIS("domtree", DominatorTreeAnalysis())
FUNCTION_ANALYSIS("loops", LoopAnalysis())
FUNCTION_ANALYSIS("no-op-function", NoOpFunctionAnalysis())
+FUNCTION_ANALYSIS("scalar-evolution", ScalarEvolutionAnalysis())
FUNCTION_ANALYSIS("targetlibinfo", TargetLibraryAnalysis())
FUNCTION_ANALYSIS("targetir",
TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis())
@@ -71,6 +72,7 @@ FUNCTION_PASS("print", PrintFunctionPass(dbgs()))
FUNCTION_PASS("print<assumptions>", AssumptionPrinterPass(dbgs()))
FUNCTION_PASS("print<domtree>", DominatorTreePrinterPass(dbgs()))
FUNCTION_PASS("print<loops>", LoopPrinterPass(dbgs()))
+FUNCTION_PASS("print<scalar-evolution>", ScalarEvolutionPrinterPass(dbgs()))
FUNCTION_PASS("simplify-cfg", SimplifyCFGPass())
FUNCTION_PASS("verify", VerifierPass())
FUNCTION_PASS("verify<domtree>", DominatorTreeVerifierPass())
diff --git a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
index baadf081a64..376d5ab1087 100644
--- a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -98,7 +98,7 @@ namespace {
AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
}
private:
@@ -147,7 +147,7 @@ INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
false, false)
@@ -169,7 +169,7 @@ FunctionPass *llvm::createPPCCTRLoopsVerify() {
bool PPCCTRLoops::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
diff --git a/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
index b4e1c099f19..e2cc7d569ba 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
@@ -71,10 +71,10 @@ namespace {
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addPreserved<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
// FIXME: For some reason, preserving SE here breaks LSR (even if
// this pass changes nothing).
- // AU.addPreserved<ScalarEvolution>();
+ // AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
}
@@ -96,7 +96,7 @@ INITIALIZE_PASS_BEGIN(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
"PPC Loop Data Prefetch", false, false)
@@ -104,7 +104,7 @@ FunctionPass *llvm::createPPCLoopDataPrefetchPass() { return new PPCLoopDataPref
bool PPCLoopDataPrefetch::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
diff --git a/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
index b6e7799402e..60c0afadf34 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -73,7 +73,7 @@ namespace {
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addPreserved<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
}
bool runOnFunction(Function &F) override;
@@ -93,7 +93,7 @@ char PPCLoopPreIncPrep::ID = 0;
static const char *name = "Prepare loop for pre-inc. addressing modes";
INITIALIZE_PASS_BEGIN(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
FunctionPass *llvm::createPPCLoopPreIncPrepPass(PPCTargetMachine &TM) {
@@ -140,7 +140,7 @@ static Value *GetPointerOperand(Value *MemI) {
bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
bool MadeChange = false;
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index 8918909f484..3c2c0cbc3d2 100644
--- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -54,13 +54,13 @@ struct AlignmentFromAssumptions : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
}
// For memory transfers, we need a common alignment for both the source and
@@ -84,7 +84,7 @@ INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME,
aip_name, false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME,
aip_name, false, false)
@@ -410,7 +410,7 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
bool AlignmentFromAssumptions::runOnFunction(Function &F) {
bool Changed = false;
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
NewDestAlignments.clear();
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 9c2e11be3f9..66f8aef98d4 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -108,10 +108,10 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
AU.setPreservesCFG();
@@ -147,7 +147,7 @@ INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
"Induction Variable Simplification", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(IndVarSimplify, "indvars",
@@ -1959,7 +1959,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
return false;
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index 08fdcc38c04..736b656499d 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -214,7 +214,7 @@ public:
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<BranchProbabilityInfoWrapperPass>();
}
@@ -1399,7 +1399,7 @@ bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) {
LLVMContext &Context = Preheader->getContext();
InductiveRangeCheck::AllocatorTy IRCAlloc;
SmallVector<InductiveRangeCheck *, 16> RangeChecks;
- ScalarEvolution &SE = getAnalysis<ScalarEvolution>();
+ ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
BranchProbabilityInfo &BPI =
getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 97490f78cf7..f2e9e4e71ad 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -120,7 +120,7 @@ namespace {
AU.addPreservedID(LCSSAID);
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
@@ -164,7 +164,7 @@ INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -264,9 +264,10 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
// FIXME: This is really heavy handed. It would be a bit better to use an
// SSAUpdater strategy during promotion that was LCSSA aware and reformed
// it as it went.
- if (Changed)
- formLCSSARecursively(*L, *DT, LI,
- getAnalysisIfAvailable<ScalarEvolution>());
+ if (Changed) {
+ auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
+ formLCSSARecursively(*L, *DT, LI, SEWP ? &SEWP->getSE() : nullptr);
+ }
}
// Check that neither this loop nor its parent have had LCSSA broken. LICM is
diff --git a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 98b068edf58..d0e8b7c8fc4 100644
--- a/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -40,11 +40,11 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreservedID(LoopSimplifyID);
@@ -64,7 +64,7 @@ INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
"Delete dead loops", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopDeletion, "loop-deletion",
@@ -169,7 +169,7 @@ bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) {
// Don't remove loops for which we can't solve the trip count.
// They could be infinite, in which case we'd be changing program behavior.
- ScalarEvolution &SE = getAnalysis<ScalarEvolution>();
+ ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
const SCEV *S = SE.getMaxBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(S))
return Changed;
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index ee520310520..44e9afd5e12 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -96,8 +96,8 @@ public:
AU.addPreservedID(LCSSAID);
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
- AU.addRequired<ScalarEvolution>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
@@ -146,7 +146,7 @@ INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
@@ -192,7 +192,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
*CurLoop->getHeader()->getParent());
diff --git a/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index e1250265475..39c7738ca9c 100644
--- a/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -48,7 +48,7 @@ namespace {
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index 2f3b5e97185..b99210a7a6b 100644
--- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -437,7 +437,7 @@ struct LoopInterchange : public FunctionPass {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
@@ -447,7 +447,7 @@ struct LoopInterchange : public FunctionPass {
}
bool runOnFunction(Function &F) override {
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DA = &getAnalysis<DependenceAnalysis>();
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
@@ -1289,7 +1289,7 @@ INITIALIZE_PASS_BEGIN(LoopInterchange, "loop-interchange",
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(DependenceAnalysis)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
diff --git a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
index 05d8d20b1d2..50f95055756 100644
--- a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -152,7 +152,7 @@ namespace {
AU.addPreserved<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
@@ -452,7 +452,7 @@ INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(LoopReroll, "loop-reroll", "Reroll loops", false, false)
@@ -1468,7 +1468,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
diff --git a/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 770a549709f..1c108fbc87a 100644
--- a/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -66,7 +66,7 @@ namespace {
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
}
@@ -384,8 +384,8 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// Anything ScalarEvolution may know about this loop or the PHI nodes
// in its header will soon be invalidated.
- if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>())
- SE->forgetLoop(L);
+ if (auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>())
+ SEWP->getSE().forgetLoop(L);
DEBUG(dbgs() << "LoopRotation: rotating "; L->dump());
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 4904426dfa8..e18d284367f 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -4785,7 +4785,8 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
}
LSRInstance::LSRInstance(Loop *L, Pass *P)
- : IU(P->getAnalysis<IVUsers>()), SE(P->getAnalysis<ScalarEvolution>()),
+ : IU(P->getAnalysis<IVUsers>()),
+ SE(P->getAnalysis<ScalarEvolutionWrapperPass>().getSE()),
DT(P->getAnalysis<DominatorTreeWrapperPass>().getDomTree()),
LI(P->getAnalysis<LoopInfoWrapperPass>().getLoopInfo()),
TTI(P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
@@ -4958,7 +4959,7 @@ INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
"Loop Strength Reduction", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(IVUsers)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
@@ -4984,8 +4985,8 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(LoopSimplifyID);
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
// Requiring LoopSimplify a second time here prevents IVUsers from running
// twice, since LoopSimplify was invalidated by running ScalarEvolution.
AU.addRequiredID(LoopSimplifyID);
@@ -5008,7 +5009,8 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
if (EnablePhiElim && L->isLoopSimplifyForm()) {
SmallVector<WeakVH, 16> DeadInsts;
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
- SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), DL, "lsr");
+ SCEVExpander Rewriter(getAnalysis<ScalarEvolutionWrapperPass>().getSE(), DL,
+ "lsr");
#ifndef NDEBUG
Rewriter.setDebugType(DEBUG_TYPE);
#endif
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 0c047e78eab..f38360e6218 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -144,8 +144,8 @@ namespace {
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
- AU.addRequired<ScalarEvolution>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
// FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info.
// If loop unroll does not preserve dom info then LCSSA pass on next
@@ -241,7 +241,7 @@ INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial,
@@ -894,7 +894,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- ScalarEvolution *SE = &getAnalysis<ScalarEvolution>();
+ ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
const TargetTransformInfo &TTI =
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 62047b96fac..7475150b0c2 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -193,7 +193,7 @@ namespace {
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
}
@@ -915,8 +915,8 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
<< " blocks] in Function " << F->getName()
<< " when '" << *Val << "' == " << *LIC << "\n");
- if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>())
- SE->forgetLoop(L);
+ if (auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>())
+ SEWP->getSE().forgetLoop(L);
LoopBlocks.clear();
NewBlocks.clear();
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index 58b9c9d092d..aae1cce8064 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -110,11 +110,11 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addPreserved<TargetLibraryInfoWrapperPass>();
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.setPreservesCFG();
@@ -191,7 +191,7 @@ INITIALIZE_PASS_BEGIN(NaryReassociate, "nary-reassociate", "Nary reassociation",
false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(NaryReassociate, "nary-reassociate", "Nary reassociation",
@@ -207,7 +207,7 @@ bool NaryReassociate::runOnFunction(Function &F) {
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 79b4e1a83ee..eb1fe48f42d 100644
--- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -142,7 +142,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
}
bool runOnFunction(Function &F) override {
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
for (auto I = LI->begin(), E = LI->end(); I != E; I++) {
@@ -153,7 +153,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
// We no longer modify the IR at all in this pass. Thus all
// analysis are preserved.
@@ -747,7 +747,7 @@ FunctionPass *llvm::createPlaceSafepointsPass() {
INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
"place-backedge-safepoints-impl",
"Place Backedge Safepoints", false, false)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 97950caa3de..bccd8164ed4 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -322,7 +322,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.setPreservesCFG();
}
@@ -412,7 +412,7 @@ INITIALIZE_PASS_BEGIN(
"Split GEPs to a variadic base and a constant offset for better CSE", false,
false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(
SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
@@ -1035,7 +1035,7 @@ bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
return false;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
bool Changed = false;
for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index b2b886c4f11..913454bdfe8 100644
--- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -131,7 +131,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
// We do not modify the shape of the CFG.
AU.setPreservesCFG();
@@ -212,7 +212,7 @@ char StraightLineStrengthReduce::ID = 0;
INITIALIZE_PASS_BEGIN(StraightLineStrengthReduce, "slsr",
"Straight line strength reduction", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(StraightLineStrengthReduce, "slsr",
"Straight line strength reduction", false, false)
@@ -690,7 +690,7 @@ bool StraightLineStrengthReduce::runOnFunction(Function &F) {
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
// Traverse the dominator tree in the depth-first order. This order makes sure
// all bases of a candidate are in Candidates when we process it.
for (auto node = GraphTraits<DominatorTree *>::nodes_begin(DT);
diff --git a/llvm/lib/Transforms/Utils/LCSSA.cpp b/llvm/lib/Transforms/Utils/LCSSA.cpp
index d45944d47cf..4e8c1ad2f82 100644
--- a/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -298,7 +298,7 @@ struct LCSSA : public FunctionPass {
AU.addRequired<LoopInfoWrapperPass>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
}
};
}
@@ -318,7 +318,8 @@ bool LCSSA::runOnFunction(Function &F) {
bool Changed = false;
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = getAnalysisIfAvailable<ScalarEvolution>();
+ auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
+ SE = SEWP ? &SEWP->getSE() : nullptr;
// Simplify each loop nest in the function.
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index ca188f470f5..49950165cc1 100644
--- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -760,7 +760,7 @@ namespace {
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.addPreserved<DependenceAnalysis>();
AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
}
@@ -790,7 +790,8 @@ bool LoopSimplify::runOnFunction(Function &F) {
bool Changed = false;
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = getAnalysisIfAvailable<ScalarEvolution>();
+ auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
+ SE = SEWP ? &SEWP->getSE() : nullptr;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
// Simplify each loop nest in the function.
diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index 6e7a571da51..971f01b72a1 100644
--- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -110,10 +110,11 @@ FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI, LPPassManager *LPM,
// ScalarEvolution holds references to loop exit blocks.
if (LPM) {
- if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
+ if (auto *SEWP =
+ LPM->getAnalysisIfAvailable<ScalarEvolutionWrapperPass>()) {
if (Loop *L = LI->getLoopFor(BB)) {
if (ForgottenLoops.insert(L).second)
- SE->forgetLoop(L);
+ SEWP->getSE().forgetLoop(L);
}
}
}
@@ -232,8 +233,9 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
// Notify ScalarEvolution that the loop will be substantially changed,
// if not outright eliminated.
- ScalarEvolution *SE =
- PP ? PP->getAnalysisIfAvailable<ScalarEvolution>() : nullptr;
+ auto *SEWP =
+ PP ? PP->getAnalysisIfAvailable<ScalarEvolutionWrapperPass>() : nullptr;
+ ScalarEvolution *SE = SEWP ? &SEWP->getSE() : nullptr;
if (SE)
SE->forgetLoop(L);
diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 5658e9201c3..d803ddf040b 100644
--- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -293,13 +293,14 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count,
// loops to be unrolled than relying on induction var simplification
if (!LPM)
return false;
- ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
- if (!SE)
+ auto *SEWP = LPM->getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
+ if (!SEWP)
return false;
+ ScalarEvolution &SE = SEWP->getSE();
// Only unroll loops with a computable trip count and the trip count needs
// to be an int value (allowing a pointer type is a TODO item)
- const SCEV *BECountSC = SE->getBackedgeTakenCount(L);
+ const SCEV *BECountSC = SE.getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BECountSC) ||
!BECountSC->getType()->isIntegerTy())
return false;
@@ -308,13 +309,13 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count,
// Add 1 since the backedge count doesn't include the first loop iteration
const SCEV *TripCountSC =
- SE->getAddExpr(BECountSC, SE->getConstant(BECountSC->getType(), 1));
+ SE.getAddExpr(BECountSC, SE.getConstant(BECountSC->getType(), 1));
if (isa<SCEVCouldNotCompute>(TripCountSC))
return false;
BasicBlock *Header = L->getHeader();
const DataLayout &DL = Header->getModule()->getDataLayout();
- SCEVExpander Expander(*SE, DL, "loop-unroll");
+ SCEVExpander Expander(SE, DL, "loop-unroll");
if (!AllowExpensiveTripCount && Expander.isHighCostExpansion(TripCountSC, L))
return false;
@@ -331,7 +332,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count,
// If this loop is nested, then the loop unroller changes the code in
// parent loop, so the Scalar Evolution pass needs to be run again
if (Loop *ParentLoop = L->getParentLoop())
- SE->forgetLoop(ParentLoop);
+ SE.forgetLoop(ParentLoop);
// Grab analyses that we preserve.
auto *DTWP = LPM->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
diff --git a/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
index 8b238caf8c8..7ddf2b9505a 100644
--- a/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -207,7 +207,7 @@ namespace {
: BasicBlockPass(ID), Config(C) {
AA = &P->getAnalysis<AliasAnalysis>();
DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = &P->getAnalysis<ScalarEvolution>();
+ SE = &P->getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TLI = &P->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = IgnoreTargetInfo
? nullptr
@@ -442,7 +442,7 @@ namespace {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = IgnoreTargetInfo
? nullptr
@@ -456,12 +456,12 @@ namespace {
BasicBlockPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addPreserved<ScalarEvolutionWrapperPass>();
AU.setPreservesCFG();
}
@@ -3200,7 +3200,7 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
BasicBlockPass *llvm::createBBVectorizePass(const VectorizeConfig &C) {
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index d40fe33e6c0..e131cc02e37 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1569,7 +1569,7 @@ struct LoopVectorize : public FunctionPass {
BlockFrequency ColdEntryFreq;
bool runOnFunction(Function &F) override {
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
@@ -1863,7 +1863,7 @@ struct LoopVectorize : public FunctionPass {
AU.addRequired<BlockFrequencyInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<LoopAccessAnalysis>();
@@ -5399,7 +5399,7 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 61773cd2013..df15a70753c 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -3076,7 +3076,7 @@ struct SLPVectorizer : public FunctionPass {
if (skipOptnoneFunction(F))
return false;
- SE = &getAnalysis<ScalarEvolution>();
+ SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
@@ -3141,7 +3141,7 @@ struct SLPVectorizer : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<ScalarEvolution>();
+ AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<AliasAnalysis>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
@@ -4053,7 +4053,7 @@ INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
OpenPOWER on IntegriCloud