summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2015-08-17 02:08:17 +0000
committerChandler Carruth <chandlerc@gmail.com>2015-08-17 02:08:17 +0000
commit2f1fd1658f72af7c3313306626fe8b2957bb4b4b (patch)
tree9c930b5df4e2049125766d74b126630c00a6b65e /llvm/lib/Analysis/ScalarEvolutionExpander.cpp
parentb596ba2376a7e4391049bcfca14b60c0cdb18cc8 (diff)
downloadbcm5719-llvm-2f1fd1658f72af7c3313306626fe8b2957bb4b4b.tar.gz
bcm5719-llvm-2f1fd1658f72af7c3313306626fe8b2957bb4b4b.zip
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces one from a pass as needed. Making this work well requires making the object movable, using references instead of overwritten pointers in a number of places, and other refactorings. I've also wired it up to the new pass manager and added a RUN line to a test to exercise it under the new pass manager. This includes basic printing support much like with other analyses. But there is a big and somewhat scary change here. Prior to this patch ScalarEvolution was never *actually* invalidated!!! Re-running the pass just re-wired up the various other analyses and didn't remove any of the existing entries in the SCEV caches or clear out anything at all. This might seem OK as everything in SCEV that can uses ValueHandles to track updates to the values that serve as SCEV keys. However, this still means that as we ran SCEV over each function in the module, we kept accumulating more and more SCEVs into the cache. At the end, we would have a SCEV cache with every value that we ever needed a SCEV for in the entire module!!! Yowzers. The releaseMemory routine would dump all of this, but that isn't realy called during normal runs of the pipeline as far as I can see. To make matters worse, there *is* actually a key that we don't update with value handles -- there is a map keyed off of Loop*s. Because LoopInfo *does* release its memory from run to run, it is entirely possible to run SCEV over one function, then over another function, and then lookup a Loop* from the second function but find an entry inserted for the first function! Ouch. To make matters still worse, there are plenty of updates that *don't* trip a value handle. It seems incredibly unlikely that today GVN or another pass that invalidates SCEV can update values in *just* such a way that a subsequent run of SCEV will incorrectly find lookups in a cache, but it is theoretically possible and would be a nightmare to debug. With this refactoring, I've fixed all this by actually destroying and recreating the ScalarEvolution object from run to run. Technically, this could increase the amount of malloc traffic we see, but then again it is also technically correct. ;] I don't actually think we're suffering from tons of malloc traffic from SCEV because if we were, the fact that we never clear the memory would seem more likely to have come up as an actual problem before now. So, I've made the simple fix here. If in fact there are serious issues with too much allocation and deallocation, I can work on a clever fix that preserves the allocations (while clearing the data) between each run, but I'd prefer to do that kind of optimization with a test case / benchmark that shows why we need such cleverness (and that can test that we actually make it faster). It's possible that this will make some things faster by making the SCEV caches have higher locality (due to being significantly smaller) so until there is a clear benchmark, I think the simple change is best. Differential Revision: http://reviews.llvm.org/D12063 llvm-svn: 245193
Diffstat (limited to 'llvm/lib/Analysis/ScalarEvolutionExpander.cpp')
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp67
1 files changed, 32 insertions, 35 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 7789423cf6c..d0b36431130 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -80,7 +80,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// We assert at the end of the function since IP might point to an
// instruction with different dominance properties than a cast
// (an invoke for example) and not dominate BIP (but the cast does).
- assert(SE.DT->dominates(Ret, BIP));
+ assert(SE.DT.dominates(Ret, BIP));
rememberInstruction(Ret);
return Ret;
@@ -186,7 +186,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
BuilderType::InsertPointGuard Guard(Builder);
// Move the insertion point out of as many loops as we can.
- while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
@@ -485,7 +485,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
assert(!isa<Instruction>(V) ||
- SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
+ SE.DT.dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
@@ -519,7 +519,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
BuilderType::InsertPointGuard Guard(Builder);
// Move the insertion point out of as many loops as we can.
- while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
@@ -539,7 +539,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
// Move the insertion point out of as many loops as we can.
- while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
+ while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V)) break;
bool AnyIndexNotLoopInvariant = false;
@@ -605,7 +605,7 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
return nullptr;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
- return Pair.first->second = SE.LI->getLoopFor(I->getParent());
+ return Pair.first->second = SE.LI.getLoopFor(I->getParent());
// A non-instruction has no relevant loops.
return nullptr;
}
@@ -615,7 +615,7 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
I != E; ++I)
- L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
+ L = PickMostRelevantLoop(L, getRelevantLoop(*I), SE.DT);
return RelevantLoops[N] = L;
}
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
@@ -623,10 +623,8 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
return RelevantLoops[C] = Result;
}
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
- const Loop *Result =
- PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
- getRelevantLoop(D->getRHS()),
- *SE.DT);
+ const Loop *Result = PickMostRelevantLoop(
+ getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
return RelevantLoops[D] = Result;
}
llvm_unreachable("Unexpected SCEV type!");
@@ -681,7 +679,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// Sort by loop. Use a stable sort so that constants follow non-constants and
// pointer operands precede non-pointer operands.
- std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
+ std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
// Emit instructions to add all the operands. Hoist as much as possible
// out of loops, and form meaningful getelementptrs where possible.
@@ -749,7 +747,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants.
- std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
+ std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
// Emit instructions to mul all the operands. Hoist as much as possible
// out of loops.
@@ -836,7 +834,7 @@ bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
for (User::op_iterator OI = IncV->op_begin()+1,
OE = IncV->op_end(); OI != OE; ++OI)
if (Instruction *OInst = dyn_cast<Instruction>(OI))
- if (!SE.DT->dominates(OInst, IVIncInsertPos))
+ if (!SE.DT.dominates(OInst, IVIncInsertPos))
return false;
}
// Advance to the next instruction.
@@ -875,7 +873,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
case Instruction::Add:
case Instruction::Sub: {
Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
- if (!OInst || SE.DT->dominates(OInst, InsertPos))
+ if (!OInst || SE.DT.dominates(OInst, InsertPos))
return dyn_cast<Instruction>(IncV->getOperand(0));
return nullptr;
}
@@ -887,7 +885,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
if (isa<Constant>(*I))
continue;
if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
- if (!SE.DT->dominates(OInst, InsertPos))
+ if (!SE.DT.dominates(OInst, InsertPos))
return nullptr;
}
if (allowScale) {
@@ -914,13 +912,13 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
/// it available to other uses in this loop. Recursively hoist any operands,
/// until we reach a value that dominates InsertPos.
bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
- if (SE.DT->dominates(IncV, InsertPos))
+ if (SE.DT.dominates(IncV, InsertPos))
return true;
// InsertPos must itself dominate IncV so that IncV's new position satisfies
// its existing users.
- if (isa<PHINode>(InsertPos)
- || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
+ if (isa<PHINode>(InsertPos) ||
+ !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
return false;
// Check that the chain of IV operands leading back to Phi can be hoisted.
@@ -932,7 +930,7 @@ bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
// IncV is safe to hoist.
IVIncs.push_back(IncV);
IncV = Oper;
- if (SE.DT->dominates(IncV, InsertPos))
+ if (SE.DT.dominates(IncV, InsertPos))
break;
}
for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
@@ -1086,8 +1084,9 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Only try partially matching scevs that need truncation and/or
// step-inversion if we know this loop is outside the current loop.
- bool TryNonMatchingSCEV = IVIncInsertLoop &&
- SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
+ bool TryNonMatchingSCEV =
+ IVIncInsertLoop &&
+ SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I) {
@@ -1144,7 +1143,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Potentially, move the increment. We have made sure in
// isExpandedAddRecExprPHI or hoistIVInc that this is possible.
if (L == IVIncInsertLoop)
- hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
+ hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
@@ -1174,8 +1173,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// StartV must be hoisted into L's preheader to dominate the new phi.
assert(!isa<Instruction>(StartV) ||
- SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
- L->getHeader()));
+ SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
+ L->getHeader()));
// Expand code for the step value. Do this before creating the PHI so that PHI
// reuse code doesn't see an incomplete PHI.
@@ -1251,9 +1250,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
if (PostIncLoops.count(L)) {
PostIncLoopSet Loops;
Loops.insert(L);
- Normalized =
- cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
- nullptr, Loops, SE, *SE.DT));
+ Normalized = cast<SCEVAddRecExpr>(TransformForPostIncUse(
+ Normalize, S, nullptr, nullptr, Loops, SE, SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
@@ -1303,9 +1301,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// For an expansion to use the postinc form, the client must call
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
- if (isa<Instruction>(Result)
- && !SE.DT->dominates(cast<Instruction>(Result),
- Builder.GetInsertPoint())) {
+ if (isa<Instruction>(Result) &&
+ !SE.DT.dominates(cast<Instruction>(Result), Builder.GetInsertPoint())) {
// The induction variable's postinc expansion does not dominate this use.
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
@@ -1608,7 +1605,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
// Compute an insertion point for this SCEV object. Hoist the instructions
// as far out in the loop nest as possible.
Instruction *InsertPt = Builder.GetInsertPoint();
- for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
+ for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
L = L->getParentLoop())
if (SE.isLoopInvariant(S, L)) {
if (!L) break;
@@ -1719,7 +1716,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// Fold constant phis. They may be congruent to other constant phis and
// would confuse the logic below that expects proper IVs.
- if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) {
+ if (Value *V = SimplifyInstruction(Phi, DL, &SE.TLI, &SE.DT, &SE.AC)) {
Phi->replaceAllUsesWith(V);
DeadInsts.emplace_back(Phi);
++NumElim;
@@ -1832,10 +1829,10 @@ Value *SCEVExpander::findExistingExpansion(const SCEV *S,
TrueBB, FalseBB)))
continue;
- if (SE.getSCEV(LHS) == S && SE.DT->dominates(LHS, At))
+ if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
return LHS;
- if (SE.getSCEV(RHS) == S && SE.DT->dominates(RHS, At))
+ if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
return RHS;
}
OpenPOWER on IntegriCloud