summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/TargetTransformInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis/TargetTransformInfo.cpp')
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp85
1 files changed, 85 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 83840aa7fbb..693caadcfc2 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -40,6 +40,91 @@ struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
};
}
+bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
+ LoopInfo &LI, DominatorTree &DT,
+ bool ForceNestedLoop,
+ bool ForceHardwareLoopPHI) {
+ SmallVector<BasicBlock *, 4> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
+ IE = ExitingBlocks.end();
+ I != IE; ++I) {
+ BasicBlock *BB = *I;
+
+ // If we pass the updated counter back through a phi, we need to know
+ // which latch the updated value will be coming from.
+ if (!L->isLoopLatch(BB)) {
+ if (ForceHardwareLoopPHI || CounterInReg)
+ continue;
+ }
+
+ const SCEV *EC = SE.getExitCount(L, BB);
+ if (isa<SCEVCouldNotCompute>(EC))
+ continue;
+ if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
+ if (ConstEC->getValue()->isZero())
+ continue;
+ } else if (!SE.isLoopInvariant(EC, L))
+ continue;
+
+ if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
+ continue;
+
+ // If this exiting block is contained in a nested loop, it is not eligible
+ // for insertion of the branch-and-decrement since the inner loop would
+ // end up messing up the value in the CTR.
+ if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
+ continue;
+
+ // We now have a loop-invariant count of loop iterations (which is not the
+ // constant zero) for which we know that this loop will not exit via this
+ // existing block.
+
+ // We need to make sure that this block will run on every loop iteration.
+ // For this to be true, we must dominate all blocks with backedges. Such
+ // blocks are in-loop predecessors to the header block.
+ bool NotAlways = false;
+ for (pred_iterator PI = pred_begin(L->getHeader()),
+ PIE = pred_end(L->getHeader());
+ PI != PIE; ++PI) {
+ if (!L->contains(*PI))
+ continue;
+
+ if (!DT.dominates(*I, *PI)) {
+ NotAlways = true;
+ break;
+ }
+ }
+
+ if (NotAlways)
+ continue;
+
+ // Make sure this blocks ends with a conditional branch.
+ Instruction *TI = BB->getTerminator();
+ if (!TI)
+ continue;
+
+ if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
+ if (!BI->isConditional())
+ continue;
+
+ ExitBranch = BI;
+ } else
+ continue;
+
+ // Note that this block may not be the loop latch block, even if the loop
+ // has a latch block.
+ ExitBlock = *I;
+ ExitCount = EC;
+ break;
+ }
+
+ if (!ExitBlock)
+ return false;
+ return true;
+}
+
TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
: TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
OpenPOWER on IntegriCloud