summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis/ScalarEvolution.cpp
diff options
context:
space:
mode:
authorTeresa Johnson <tejohnson@google.com>2019-03-12 18:28:05 +0000
committerTeresa Johnson <tejohnson@google.com>2019-03-12 18:28:05 +0000
commit4ab0a9f0a4e7daff3fc54fd48d429c6c2017a458 (patch)
tree0c2348375e6bd13eb2c6e9cef57b3b6fb9bb00fb /llvm/lib/Analysis/ScalarEvolution.cpp
parent9dc2c1d564b25e82a002e312ed9d543637aae027 (diff)
downloadbcm5719-llvm-4ab0a9f0a4e7daff3fc54fd48d429c6c2017a458.tar.gz
bcm5719-llvm-4ab0a9f0a4e7daff3fc54fd48d429c6c2017a458.zip
[SCEV] Use depth limit for trunc analysis
Summary: This fixes an extremely long compile time caused by recursive analysis of truncs, which were not previously subject to any depth limits unlike some of the other ops. I decided to use the same control used for sext/zext, since the routines analyzing these are sometimes mutually recursive with the trunc analysis. Reviewers: mkazantsev, sanjoy Subscribers: sanjoy, jdoerfert, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D58994 llvm-svn: 355949
Diffstat (limited to 'llvm/lib/Analysis/ScalarEvolution.cpp')
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp65
1 files changed, 36 insertions, 29 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index f14f0463000..8726ab35403 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -202,9 +202,9 @@ static cl::opt<unsigned> MaxConstantEvolvingDepth(
cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
static cl::opt<unsigned>
- MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden,
- cl::desc("Maximum depth of recursive SExt/ZExt"),
- cl::init(8));
+ MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
+ cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
+ cl::init(8));
static cl::opt<unsigned>
MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
@@ -1234,8 +1234,8 @@ const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
// SCEV Expression folder implementations
//===----------------------------------------------------------------------===//
-const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
- Type *Ty) {
+const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
+ unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
"This is not a truncating conversion!");
assert(isSCEVable(Ty) &&
@@ -1256,15 +1256,23 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
// trunc(trunc(x)) --> trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
- return getTruncateExpr(ST->getOperand(), Ty);
+ return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
// trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
- return getTruncateOrSignExtend(SS->getOperand(), Ty);
+ return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
// trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
- return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
+ return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
+
+ if (Depth > MaxCastDepth) {
+ SCEV *S =
+ new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
+ UniqueSCEVs.InsertNode(S, IP);
+ addToLoopUseLists(S);
+ return S;
+ }
// trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
// trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
@@ -1276,7 +1284,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
unsigned numTruncs = 0;
for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
++i) {
- const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty);
+ const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S))
numTruncs++;
Operands.push_back(S);
@@ -1300,7 +1308,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
SmallVector<const SCEV *, 4> Operands;
for (const SCEV *Op : AddRec->operands())
- Operands.push_back(getTruncateExpr(Op, Ty));
+ Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
}
@@ -1634,7 +1642,7 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
ID.AddPointer(Ty);
void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- if (Depth > MaxExtDepth) {
+ if (Depth > MaxCastDepth) {
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
@@ -1652,7 +1660,7 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
unsigned NewBits = getTypeSizeInBits(Ty);
if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
CR.zextOrTrunc(NewBits)))
- return getTruncateOrZeroExtend(X, Ty);
+ return getTruncateOrZeroExtend(X, Ty, Depth);
}
// If the input value is a chrec scev, and we can prove that the value
@@ -1694,9 +1702,9 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
// Check whether the backedge-taken count can be losslessly casted to
// the addrec's type. The count is always unsigned.
const SCEV *CastedMaxBECount =
- getTruncateOrZeroExtend(MaxBECount, Start->getType());
- const SCEV *RecastedMaxBECount =
- getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
+ getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
+ const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
+ CastedMaxBECount, MaxBECount->getType(), Depth);
if (MaxBECount == RecastedMaxBECount) {
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
@@ -1945,7 +1953,7 @@ ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// Limit recursion depth.
- if (Depth > MaxExtDepth) {
+ if (Depth > MaxCastDepth) {
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
@@ -1963,7 +1971,7 @@ ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
unsigned NewBits = getTypeSizeInBits(Ty);
if (CR.truncate(TruncBits).signExtend(NewBits).contains(
CR.sextOrTrunc(NewBits)))
- return getTruncateOrSignExtend(X, Ty);
+ return getTruncateOrSignExtend(X, Ty, Depth);
}
if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
@@ -2038,9 +2046,9 @@ ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
// Check whether the backedge-taken count can be losslessly casted to
// the addrec's type. The count is always unsigned.
const SCEV *CastedMaxBECount =
- getTruncateOrZeroExtend(MaxBECount, Start->getType());
- const SCEV *RecastedMaxBECount =
- getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
+ getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
+ const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
+ CastedMaxBECount, MaxBECount->getType(), Depth);
if (MaxBECount == RecastedMaxBECount) {
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
@@ -4038,29 +4046,28 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
}
-const SCEV *
-ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
+const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
+ unsigned Depth) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
- return getTruncateExpr(V, Ty);
- return getZeroExtendExpr(V, Ty);
+ return getTruncateExpr(V, Ty, Depth);
+ return getZeroExtendExpr(V, Ty, Depth);
}
-const SCEV *
-ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
- Type *Ty) {
+const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
+ unsigned Depth) {
Type *SrcTy = V->getType();
assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
- return getTruncateExpr(V, Ty);
- return getSignExtendExpr(V, Ty);
+ return getTruncateExpr(V, Ty, Depth);
+ return getSignExtendExpr(V, Ty, Depth);
}
const SCEV *
OpenPOWER on IntegriCloud