summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/ScalarEvolution.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis/ScalarEvolution.cpp')
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp121
1 files changed, 36 insertions, 85 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 3a99f2a2157..7962222a210 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -64,8 +64,8 @@
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
@@ -1212,7 +1212,6 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -1599,7 +1598,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// these to prove lack of overflow. Use this fact to avoid
// doing extra work that may not pay off.
if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
- !AffectedMap.empty()) {
+ !AC.assumptions().empty()) {
// If the backedge is guarded by a comparison with the pre-inc
// value the addrec is safe. Also, if the entry is guarded by
// a comparison with the start value and the backedge is
@@ -1665,7 +1664,6 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -1835,7 +1833,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// doing extra work that may not pay off.
if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
- !AffectedMap.empty()) {
+ !AC.assumptions().empty()) {
// If the backedge is guarded by a comparison with the pre-inc
// value the addrec is safe. Also, if the entry is guarded by
// a comparison with the start value and the backedge is
@@ -1893,7 +1891,6 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -2447,7 +2444,6 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -2740,7 +2736,6 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -2861,7 +2856,6 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -3042,7 +3036,6 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -3198,7 +3191,6 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -3300,7 +3292,6 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
- addAffectedFromOperands(S);
return S;
}
@@ -3501,40 +3492,9 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) {
ExprValueMap[Stripped].insert({V, Offset});
}
}
-
- // If this value is an instruction or an argument, and might be affected by
- // an assumption, and its SCEV to the AffectedMap.
- if (isa<Instruction>(V) || isa<Argument>(V)) {
- for (auto *U : V->users()) {
- auto *II = dyn_cast<IntrinsicInst>(U);
- if (!II)
- continue;
- if (II->getIntrinsicID() != Intrinsic::assume)
- continue;
-
- AffectedMap[S].insert(II);
- }
- }
-
return S;
}
-// If one of this SCEV's operands is in the AffectedMap (meaning that it might
-// be affected by an assumption), then this SCEV might be affected by the same
-// assumption.
-void ScalarEvolution::addAffectedFromOperands(const SCEV *S) {
- if (auto *NS = dyn_cast<SCEVNAryExpr>(S))
- for (auto *Op : NS->operands()) {
- auto AMI = AffectedMap.find(Op);
- if (AMI == AffectedMap.end())
- continue;
-
- auto &ISet = AffectedMap[S];
- AMI = AffectedMap.find(Op);
- ISet.insert(AMI->second.begin(), AMI->second.end());
- }
-}
-
const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
@@ -4324,7 +4284,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT))
+ if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT, &AC))
if (LI.replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -4512,7 +4472,7 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0,
+ computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0, &AC,
nullptr, &DT);
return Zeros.countTrailingOnes();
}
@@ -4683,14 +4643,14 @@ ScalarEvolution::getRange(const SCEV *S,
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, nullptr, &DT);
+ computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT);
if (Ones != ~Zeros + 1)
ConservativeResult =
ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
} else {
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
"generalize as needed!");
- unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, nullptr, &DT);
+ unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
if (NS > 1)
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
@@ -5179,7 +5139,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
computeKnownBits(BO->LHS, KnownZero, KnownOne, getDataLayout(),
- 0, nullptr, &DT);
+ 0, &AC, nullptr, &DT);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@@ -6374,7 +6334,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
// bitwidth(K) iterations.
Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
bool KnownZero, KnownOne;
- ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0,
+ ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0, nullptr,
Predecessor->getTerminator(), &DT);
auto *Ty = cast<IntegerType>(RHS->getType());
if (KnownZero)
@@ -7966,23 +7926,16 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- auto CheckAssumptions = [&](const SCEV *S) {
- auto AMI = AffectedMap.find(S);
- if (AMI != AffectedMap.end())
- for (auto *Assume : AMI->second) {
- auto *CI = cast<CallInst>(Assume);
- if (!DT.dominates(CI, Latch->getTerminator()))
- continue;
-
- if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
- return true;
- }
-
- return false;
- };
+ for (auto &AssumeVH : AC.assumptions()) {
+ if (!AssumeVH)
+ continue;
+ auto *CI = cast<CallInst>(AssumeVH);
+ if (!DT.dominates(CI, Latch->getTerminator()))
+ continue;
- if (CheckAssumptions(LHS) || CheckAssumptions(RHS))
- return true;
+ if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+ return true;
+ }
// If the loop is not reachable from the entry block, we risk running into an
// infinite loop as we walk up into the dom tree. These loops do not matter
@@ -8067,23 +8020,16 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- auto CheckAssumptions = [&](const SCEV *S) {
- auto AMI = AffectedMap.find(S);
- if (AMI != AffectedMap.end())
- for (auto *Assume : AMI->second) {
- auto *CI = cast<CallInst>(Assume);
- if (!DT.dominates(CI, L->getHeader()))
- continue;
-
- if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
- return true;
- }
-
- return false;
- };
+ for (auto &AssumeVH : AC.assumptions()) {
+ if (!AssumeVH)
+ continue;
+ auto *CI = cast<CallInst>(AssumeVH);
+ if (!DT.dominates(CI, L->getHeader()))
+ continue;
- if (CheckAssumptions(LHS) || CheckAssumptions(RHS))
- return true;
+ if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+ return true;
+ }
return false;
}
@@ -9536,8 +9482,9 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
- DominatorTree &DT, LoopInfo &LI)
- : F(F), TLI(TLI), DT(DT), LI(LI),
+ AssumptionCache &AC, DominatorTree &DT,
+ LoopInfo &LI)
+ : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
CouldNotCompute(new SCEVCouldNotCompute()),
WalkingBEDominatingConds(false), ProvingSplitPredicate(false),
ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64),
@@ -9559,7 +9506,7 @@ ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
}
ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
- : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), DT(Arg.DT),
+ : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
ValueExprMap(std::move(Arg.ValueExprMap)),
PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
@@ -10030,7 +9977,7 @@ void ScalarEvolution::verify() const {
// Gather stringified backedge taken counts for all loops using a fresh
// ScalarEvolution object.
- ScalarEvolution SE2(F, TLI, DT, LI);
+ ScalarEvolution SE2(F, TLI, AC, DT, LI);
for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I)
getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2);
@@ -10071,6 +10018,7 @@ AnalysisKey ScalarEvolutionAnalysis::Key;
ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
FunctionAnalysisManager &AM) {
return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
+ AM.getResult<AssumptionAnalysis>(F),
AM.getResult<DominatorTreeAnalysis>(F),
AM.getResult<LoopAnalysis>(F));
}
@@ -10083,6 +10031,7 @@ ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
@@ -10097,6 +10046,7 @@ ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
SE.reset(new ScalarEvolution(
F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
+ getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
return false;
@@ -10117,6 +10067,7 @@ void ScalarEvolutionWrapperPass::verifyAnalysis() const {
void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
+ AU.addRequiredTransitive<AssumptionCacheTracker>();
AU.addRequiredTransitive<LoopInfoWrapperPass>();
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
OpenPOWER on IntegriCloud