summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/ScalarEvolution.cpp
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2016-12-15 02:53:42 +0000
committerHal Finkel <hfinkel@anl.gov>2016-12-15 02:53:42 +0000
commitcb9f78e1c3951337de4ba24902eced2c72184319 (patch)
treea2290a1daa25da9cc6b1d3a9d9c59564bef3792c /llvm/lib/Analysis/ScalarEvolution.cpp
parentdfe85e2d88cdc7684abdeff6c91356d16a394849 (diff)
downloadbcm5719-llvm-cb9f78e1c3951337de4ba24902eced2c72184319.tar.gz
bcm5719-llvm-cb9f78e1c3951337de4ba24902eced2c72184319.zip
Make processing @llvm.assume more efficient by using operand bundles
There was an efficiency problem with how we processed @llvm.assume in ValueTracking (and other places). The AssumptionCache tracked all of the assumptions in a given function. In order to find assumptions relevant to computing known bits, etc. we searched every assumption in the function. For ValueTracking, that means that we did O(#assumes * #values) work in InstCombine and other passes (with a constant factor that can be quite large because we'd repeat this search at every level of recursion of the analysis). Several of us discussed this situation at the last developers' meeting, and this implements the discussed solution: Make the values that an assume might affect operands of the assume itself. To avoid exposing this detail to frontends and passes that need not worry about it, I've used the new operand-bundle feature to add these extra call "operands" in a way that does not affect the intrinsic's signature. I think this solution is relatively clean. InstCombine adds these extra operands based on what ValueTracking, LVI, etc. will need and then those passes need only search the users of the values under consideration. This should fix the computational-complexity problem. At this point, no passes depend on the AssumptionCache, and so I'll remove that as a follow-up change. Differential Revision: https://reviews.llvm.org/D27259 llvm-svn: 289755
Diffstat (limited to 'llvm/lib/Analysis/ScalarEvolution.cpp')
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp92
1 files changed, 72 insertions, 20 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 7962222a210..29a950b67eb 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1212,6 +1212,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
return S;
}
@@ -1598,7 +1599,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// these to prove lack of overflow. Use this fact to avoid
// doing extra work that may not pay off.
if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
- !AC.assumptions().empty()) {
+ !AffectedMap.empty()) {
// If the backedge is guarded by a comparison with the pre-inc
// value the addrec is safe. Also, if the entry is guarded by
// a comparison with the start value and the backedge is
@@ -1664,6 +1665,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
return S;
}
@@ -1833,7 +1835,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// doing extra work that may not pay off.
if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
- !AC.assumptions().empty()) {
+ !AffectedMap.empty()) {
// If the backedge is guarded by a comparison with the pre-inc
// value the addrec is safe. Also, if the entry is guarded by
// a comparison with the start value and the backedge is
@@ -1891,6 +1893,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
return S;
}
@@ -2444,6 +2447,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -2736,6 +2740,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -2856,6 +2861,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
return S;
}
@@ -3036,6 +3042,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
}
S->setNoWrapFlags(Flags);
return S;
@@ -3191,6 +3198,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
return S;
}
@@ -3292,6 +3300,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
+ addAffectedFromOperands(S);
return S;
}
@@ -3492,9 +3501,38 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) {
ExprValueMap[Stripped].insert({V, Offset});
}
}
+
+ // If this value is an instruction or an argument, and might be affected by
+ // an assumption, and its SCEV to the AffectedMap.
+ if (isa<Instruction>(V) || isa<Argument>(V)) {
+ for (auto *U : V->users()) {
+ auto *II = dyn_cast<IntrinsicInst>(U);
+ if (!II)
+ continue;
+ if (II->getIntrinsicID() != Intrinsic::assume)
+ continue;
+
+ AffectedMap[S].insert(II);
+ }
+ }
+
return S;
}
+// If one of this SCEV's operands is in the AffectedMap (meaning that it might
+// be affected by an assumption), then this SCEV might be affected by the same
+// assumption.
+void ScalarEvolution::addAffectedFromOperands(const SCEV *S) {
+ if (auto *NS = dyn_cast<SCEVNAryExpr>(S))
+ for (auto *Op : NS->operands()) {
+ auto AMI = AffectedMap.find(Op);
+ if (AMI == AffectedMap.end())
+ continue;
+
+ AffectedMap[S].insert(AMI->second.begin(), AMI->second.end());
+ }
+}
+
const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
@@ -7926,16 +7964,23 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- for (auto &AssumeVH : AC.assumptions()) {
- if (!AssumeVH)
- continue;
- auto *CI = cast<CallInst>(AssumeVH);
- if (!DT.dominates(CI, Latch->getTerminator()))
- continue;
+ auto CheckAssumptions = [&](const SCEV *S) {
+ auto AMI = AffectedMap.find(S);
+ if (AMI != AffectedMap.end())
+ for (auto *Assume : AMI->second) {
+ auto *CI = cast<CallInst>(Assume);
+ if (!DT.dominates(CI, Latch->getTerminator()))
+ continue;
- if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
- return true;
- }
+ if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+ return true;
+ }
+
+ return false;
+ };
+
+ if (CheckAssumptions(LHS) || CheckAssumptions(RHS))
+ return true;
// If the loop is not reachable from the entry block, we risk running into an
// infinite loop as we walk up into the dom tree. These loops do not matter
@@ -8020,16 +8065,23 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
}
// Check conditions due to any @llvm.assume intrinsics.
- for (auto &AssumeVH : AC.assumptions()) {
- if (!AssumeVH)
- continue;
- auto *CI = cast<CallInst>(AssumeVH);
- if (!DT.dominates(CI, L->getHeader()))
- continue;
+ auto CheckAssumptions = [&](const SCEV *S) {
+ auto AMI = AffectedMap.find(S);
+ if (AMI != AffectedMap.end())
+ for (auto *Assume : AMI->second) {
+ auto *CI = cast<CallInst>(Assume);
+ if (!DT.dominates(CI, L->getHeader()))
+ continue;
- if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
- return true;
- }
+ if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+ return true;
+ }
+
+ return false;
+ };
+
+ if (CheckAssumptions(LHS) || CheckAssumptions(RHS))
+ return true;
return false;
}
OpenPOWER on IntegriCloud