summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorChen Zheng <czhengsz@cn.ibm.com>2019-06-19 01:26:31 +0000
committerChen Zheng <czhengsz@cn.ibm.com>2019-06-19 01:26:31 +0000
commitc5b918de58c03f226aab326394ab019708dbdbae (patch)
tree2908f3301b3b1345569e05b8d5af97b742135add /llvm/lib
parent9cac4e6d1403554b06ec2fc9d834087b1234b695 (diff)
downloadbcm5719-llvm-c5b918de58c03f226aab326394ab019708dbdbae.tar.gz
bcm5719-llvm-c5b918de58c03f226aab326394ab019708dbdbae.zip
[NFC] move some hardware loop checking code to a common place for other using.
Differential Revision: https://reviews.llvm.org/D63478 llvm-svn: 363758
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp85
-rw-r--r--llvm/lib/CodeGen/HardwareLoops.cpp92
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.h2
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h2
6 files changed, 99 insertions, 86 deletions
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 83840aa7fbb..693caadcfc2 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -40,6 +40,91 @@ struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
};
}
+bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
+ LoopInfo &LI, DominatorTree &DT,
+ bool ForceNestedLoop,
+ bool ForceHardwareLoopPHI) {
+ SmallVector<BasicBlock *, 4> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
+ IE = ExitingBlocks.end();
+ I != IE; ++I) {
+ BasicBlock *BB = *I;
+
+ // If we pass the updated counter back through a phi, we need to know
+ // which latch the updated value will be coming from.
+ if (!L->isLoopLatch(BB)) {
+ if (ForceHardwareLoopPHI || CounterInReg)
+ continue;
+ }
+
+ const SCEV *EC = SE.getExitCount(L, BB);
+ if (isa<SCEVCouldNotCompute>(EC))
+ continue;
+ if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
+ if (ConstEC->getValue()->isZero())
+ continue;
+ } else if (!SE.isLoopInvariant(EC, L))
+ continue;
+
+ if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
+ continue;
+
+ // If this exiting block is contained in a nested loop, it is not eligible
+ // for insertion of the branch-and-decrement since the inner loop would
+ // end up messing up the value in the CTR.
+ if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
+ continue;
+
+ // We now have a loop-invariant count of loop iterations (which is not the
+ // constant zero) for which we know that this loop will not exit via this
+ // existing block.
+
+ // We need to make sure that this block will run on every loop iteration.
+ // For this to be true, we must dominate all blocks with backedges. Such
+ // blocks are in-loop predecessors to the header block.
+ bool NotAlways = false;
+ for (pred_iterator PI = pred_begin(L->getHeader()),
+ PIE = pred_end(L->getHeader());
+ PI != PIE; ++PI) {
+ if (!L->contains(*PI))
+ continue;
+
+ if (!DT.dominates(*I, *PI)) {
+ NotAlways = true;
+ break;
+ }
+ }
+
+ if (NotAlways)
+ continue;
+
+ // Make sure this blocks ends with a conditional branch.
+ Instruction *TI = BB->getTerminator();
+ if (!TI)
+ continue;
+
+ if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
+ if (!BI->isConditional())
+ continue;
+
+ ExitBranch = BI;
+ } else
+ continue;
+
+ // Note that this block may not be the loop latch block, even if the loop
+ // has a latch block.
+ ExitBlock = *I;
+ ExitCount = EC;
+ break;
+ }
+
+ if (!ExitBlock)
+ return false;
+ return true;
+}
+
TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
: TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
diff --git a/llvm/lib/CodeGen/HardwareLoops.cpp b/llvm/lib/CodeGen/HardwareLoops.cpp
index 99191090220..43594915383 100644
--- a/llvm/lib/CodeGen/HardwareLoops.cpp
+++ b/llvm/lib/CodeGen/HardwareLoops.cpp
@@ -101,7 +101,7 @@ namespace {
// Given that the target believes the loop to be profitable, try to
// convert it.
- bool TryConvertLoop(TTI::HardwareLoopInfo &HWLoopInfo);
+ bool TryConvertLoop(HardwareLoopInfo &HWLoopInfo);
private:
ScalarEvolution *SE = nullptr;
@@ -139,7 +139,7 @@ namespace {
void UpdateBranch(Value *EltsRem);
public:
- HardwareLoop(TTI::HardwareLoopInfo &Info, ScalarEvolution &SE,
+ HardwareLoop(HardwareLoopInfo &Info, ScalarEvolution &SE,
const DataLayout &DL) :
SE(SE), DL(DL), L(Info.L), M(L->getHeader()->getModule()),
ExitCount(Info.ExitCount),
@@ -205,7 +205,7 @@ bool HardwareLoops::TryConvertLoop(Loop *L) {
if (containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI))
return false;
- TTI::HardwareLoopInfo HWLoopInfo(L);
+ HardwareLoopInfo HWLoopInfo(L);
if (TTI->isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo) ||
ForceHardwareLoops) {
@@ -225,91 +225,19 @@ bool HardwareLoops::TryConvertLoop(Loop *L) {
return false;
}
-bool HardwareLoops::TryConvertLoop(TTI::HardwareLoopInfo &HWLoopInfo) {
+bool HardwareLoops::TryConvertLoop(HardwareLoopInfo &HWLoopInfo) {
Loop *L = HWLoopInfo.L;
LLVM_DEBUG(dbgs() << "HWLoops: Try to convert profitable loop: " << *L);
- SmallVector<BasicBlock*, 4> ExitingBlocks;
- L->getExitingBlocks(ExitingBlocks);
-
- for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
- IE = ExitingBlocks.end(); I != IE; ++I) {
- BasicBlock *BB = *I;
-
- // If we pass the updated counter back through a phi, we need to know
- // which latch the updated value will be coming from.
- if (!L->isLoopLatch(BB)) {
- if ((ForceHardwareLoopPHI.getNumOccurrences() && ForceHardwareLoopPHI) ||
- HWLoopInfo.CounterInReg)
- continue;
- }
-
- const SCEV *EC = SE->getExitCount(L, BB);
- if (isa<SCEVCouldNotCompute>(EC))
- continue;
- if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
- if (ConstEC->getValue()->isZero())
- continue;
- } else if (!SE->isLoopInvariant(EC, L))
- continue;
-
- if (SE->getTypeSizeInBits(EC->getType()) >
- HWLoopInfo.CountType->getBitWidth())
- continue;
-
- // If this exiting block is contained in a nested loop, it is not eligible
- // for insertion of the branch-and-decrement since the inner loop would
- // end up messing up the value in the CTR.
- if (!HWLoopInfo.IsNestingLegal && LI->getLoopFor(BB) != L &&
- !ForceNestedLoop)
- continue;
-
- // We now have a loop-invariant count of loop iterations (which is not the
- // constant zero) for which we know that this loop will not exit via this
- // existing block.
-
- // We need to make sure that this block will run on every loop iteration.
- // For this to be true, we must dominate all blocks with backedges. Such
- // blocks are in-loop predecessors to the header block.
- bool NotAlways = false;
- for (pred_iterator PI = pred_begin(L->getHeader()),
- PIE = pred_end(L->getHeader()); PI != PIE; ++PI) {
- if (!L->contains(*PI))
- continue;
-
- if (!DT->dominates(*I, *PI)) {
- NotAlways = true;
- break;
- }
- }
-
- if (NotAlways)
- continue;
-
- // Make sure this blocks ends with a conditional branch.
- Instruction *TI = BB->getTerminator();
- if (!TI)
- continue;
-
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- if (!BI->isConditional())
- continue;
-
- HWLoopInfo.ExitBranch = BI;
- } else
- continue;
-
- // Note that this block may not be the loop latch block, even if the loop
- // has a latch block.
- HWLoopInfo.ExitBlock = *I;
- HWLoopInfo.ExitCount = EC;
- break;
- }
-
- if (!HWLoopInfo.ExitBlock)
+ if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT, ForceNestedLoop,
+ ForceHardwareLoopPHI))
return false;
+ assert(
+ (HWLoopInfo.ExitBlock && HWLoopInfo.ExitBranch && HWLoopInfo.ExitCount) &&
+ "Hardware Loop must have set exit info.");
+
BasicBlock *Preheader = L->getLoopPreheader();
// If we don't have a preheader, then insert one.
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index ca905dfc7ff..c3a7c18bb5d 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -696,7 +696,7 @@ bool ARMTTIImpl::isLoweredToCall(const Function *F) {
bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
AssumptionCache &AC,
TargetLibraryInfo *LibInfo,
- TTI::HardwareLoopInfo &HWLoopInfo) {
+ HardwareLoopInfo &HWLoopInfo) {
// Low-overhead branches are only supported in the 'low-overhead branch'
// extension of v8.1-m.
if (!ST->hasLOB() || DisableLowOverheadLoops)
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 4f68a0b244d..52f6ea4a6e2 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -184,7 +184,7 @@ public:
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
AssumptionCache &AC,
TargetLibraryInfo *LibInfo,
- TTI::HardwareLoopInfo &HWLoopInfo);
+ HardwareLoopInfo &HWLoopInfo);
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP);
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index bc2983836f7..c312e53b354 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -492,7 +492,7 @@ bool PPCTTIImpl::mightUseCTR(BasicBlock *BB,
bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
AssumptionCache &AC,
TargetLibraryInfo *LibInfo,
- TTI::HardwareLoopInfo &HWLoopInfo) {
+ HardwareLoopInfo &HWLoopInfo) {
const PPCTargetMachine &TM = ST->getTargetMachine();
TargetSchedModel SchedModel;
SchedModel.init(ST);
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
index 93a9968b5fa..04063404e2a 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
@@ -56,7 +56,7 @@ public:
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
AssumptionCache &AC,
TargetLibraryInfo *LibInfo,
- TTI::HardwareLoopInfo &HWLoopInfo);
+ HardwareLoopInfo &HWLoopInfo);
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP);
OpenPOWER on IntegriCloud