summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2014-02-18 15:33:12 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2014-02-18 15:33:12 +0000
commit7c68bebb9cc397bd7e85ad40fd0fcdebabd21867 (patch)
treeac4525fb3ff2f75f61796037ea55c334bb33cd15 /llvm/lib/Analysis
parentcb9dc67a5e4209080f1aec3f77953b47af98477e (diff)
downloadbcm5719-llvm-7c68bebb9cc397bd7e85ad40fd0fcdebabd21867.tar.gz
bcm5719-llvm-7c68bebb9cc397bd7e85ad40fd0fcdebabd21867.zip
Rename some member variables from TD to DL.
TargetData was renamed DataLayout back in r165242. llvm-svn: 201581
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/AliasAnalysis.cpp8
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp42
-rw-r--r--llvm/lib/Analysis/IPA/InlineCost.cpp6
-rw-r--r--llvm/lib/Analysis/IVUsers.cpp6
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp8
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp22
-rw-r--r--llvm/lib/Analysis/NoAliasAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/PHITransAddr.cpp6
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp56
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionExpander.cpp20
10 files changed, 88 insertions, 88 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index a7b36dbcb0f..01d2c76a60c 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -416,9 +416,9 @@ AliasAnalysis::ModRefResult
AliasAnalysis::callCapturesBefore(const Instruction *I,
const AliasAnalysis::Location &MemLoc,
DominatorTree *DT) {
- if (!DT || !TD) return AliasAnalysis::ModRef;
+ if (!DT || !DL) return AliasAnalysis::ModRef;
- const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
+ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return AliasAnalysis::ModRef;
@@ -472,7 +472,7 @@ AliasAnalysis::~AliasAnalysis() {}
/// AliasAnalysis interface before any other methods are called.
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- TD = P->getAnalysisIfAvailable<DataLayout>();
+ DL = P->getAnalysisIfAvailable<DataLayout>();
TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>();
}
@@ -487,7 +487,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
- return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
+ return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
}
/// canBasicBlockModify - Return true if it is possible for execution of the
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 5f7dd98e193..9ed8f08af42 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -593,7 +593,7 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
- const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD);
+ const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
if (!Visited.insert(V)) {
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
@@ -698,7 +698,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
- const Value *Object = GetUnderlyingObject(Loc.Ptr, TD);
+ const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
// If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
@@ -805,7 +805,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// LLVM's vld1 and vst1 intrinsics currently only support a single
// vector register.
uint64_t Size =
- TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize;
+ DL ? DL->getTypeStoreSize(II->getType()) : UnknownSize;
if (isNoAlias(Location(II->getArgOperand(0), Size,
II->getMetadata(LLVMContext::MD_tbaa)),
Loc))
@@ -814,7 +814,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
}
case Intrinsic::arm_neon_vst1: {
uint64_t Size =
- TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
+ DL ? DL->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
if (isNoAlias(Location(II->getArgOperand(0), Size,
II->getMetadata(LLVMContext::MD_tbaa)),
Loc))
@@ -877,7 +877,7 @@ static bool areVarIndicesEqual(SmallVectorImpl<VariableGEPIndex> &Indices1,
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
-/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
+/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
/// UnderlyingV2 is the same for V2.
///
AliasAnalysis::AliasResult
@@ -911,13 +911,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
int64_t GEP2BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
- DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL);
const Value *GEP1BasePtr =
- DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
- assert(TD == 0 &&
+ assert(DL == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
@@ -937,17 +937,17 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// exactly, see if the computed offset from the common pointer tells us
// about the relation of the resulting pointer.
const Value *GEP1BasePtr =
- DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
int64_t GEP2BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
- DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
- assert(TD == 0 &&
+ assert(DL == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
@@ -977,12 +977,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
return R;
const Value *GEP1BasePtr =
- DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) {
- assert(TD == 0 &&
+ assert(DL == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
@@ -1215,8 +1215,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
return NoAlias; // Scalars cannot alias each other
// Figure out what objects these things are pointing to if we can.
- const Value *O1 = GetUnderlyingObject(V1, TD);
- const Value *O2 = GetUnderlyingObject(V2, TD);
+ const Value *O1 = GetUnderlyingObject(V1, DL);
+ const Value *O2 = GetUnderlyingObject(V2, DL);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
@@ -1265,9 +1265,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
- if (TD)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
+ if (DL)
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1319,9 +1319,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If both pointers are pointing into the same object and one of them
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
- if (TD && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
+ if (DL && O1 == O2)
+ if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
diff --git a/llvm/lib/Analysis/IPA/InlineCost.cpp b/llvm/lib/Analysis/IPA/InlineCost.cpp
index 920f0de2fa2..b80e5174943 100644
--- a/llvm/lib/Analysis/IPA/InlineCost.cpp
+++ b/llvm/lib/Analysis/IPA/InlineCost.cpp
@@ -1203,7 +1203,7 @@ INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
char InlineCostAnalysis::ID = 0;
-InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {}
+InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), DL(0) {}
InlineCostAnalysis::~InlineCostAnalysis() {}
@@ -1214,7 +1214,7 @@ void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
}
bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TTI = &getAnalysis<TargetTransformInfo>();
return false;
}
@@ -1272,7 +1272,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
- CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
+ CallAnalyzer CA(DL, *TTI, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
DEBUG(CA.dump());
diff --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index eacd4eb029d..bf03e6ad237 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -123,14 +123,14 @@ bool IVUsers::AddUsersImpl(Instruction *I,
// IVUsers is used by LSR which assumes that all SCEV expressions are safe to
// pass to SCEVExpander. Expressions are not safe to expand if they represent
// operations that are not safe to speculate, namely integer division.
- if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, TD))
+ if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL))
return false;
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
// Also avoid creating IVs of non-native types. For example, we don't want a
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
uint64_t Width = SE->getTypeSizeInBits(I->getType());
- if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
+ if (Width > 64 || (DL && !DL->isLegalInteger(Width)))
return false;
// Get the symbolic expression for this instruction.
@@ -234,7 +234,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index b6970af4cde..a07eef2c199 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1013,7 +1013,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl)
getCache(PImpl).clear();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy.
@@ -1073,7 +1073,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
// If we know the value is a constant, evaluate the conditional.
Constant *Res = 0;
if (Result.isConstant()) {
- Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD,
+ Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
TLI);
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? False : True;
@@ -1115,14 +1115,14 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
if (Pred == ICmpInst::ICMP_EQ) {
// !C1 == C -> false iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD,
+ Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return False;
} else if (Pred == ICmpInst::ICMP_NE) {
// !C1 != C -> true iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD,
+ Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return True;
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index dc07ac8c5d3..8811e22a68a 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -87,7 +87,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : 0;
@@ -258,17 +258,17 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// If we have no target data, we can't do this.
- if (TD == 0) return false;
+ if (DL == 0) return false;
// If we haven't already computed the base/offset of MemLoc, do so now.
if (MemLocBase == 0)
- MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
+ MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
- LI, *TD);
+ LI, *DL);
return Size != 0;
}
@@ -282,7 +282,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
@@ -295,7 +295,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
// Get the base of this load.
int64_t LIOffs = 0;
const Value *LIBase =
- GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
+ GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
// If the two pointers are not based on the same pointer, we can't tell that
// they are related.
@@ -331,7 +331,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
// If this load size is bigger than our known alignment or would not fit
// into a native integer register, then we fail.
if (NewLoadByteSize > LoadAlign ||
- !TD.fitsInLegalInteger(NewLoadByteSize*8))
+ !DL.fitsInLegalInteger(NewLoadByteSize*8))
return 0;
if (LIOffs+NewLoadByteSize > MemLocEnd &&
@@ -424,7 +424,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
- MemLocOffset, LI, TD))
+ MemLocOffset, LI, DL))
return MemDepResult::getClobber(Inst);
continue;
@@ -500,7 +500,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// need to continue scanning until the malloc call.
const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
- const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
+ const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
@@ -773,7 +773,7 @@ getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
"Can't get pointer deps of a non-pointer!");
Result.clear();
- PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
+ PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
diff --git a/llvm/lib/Analysis/NoAliasAnalysis.cpp b/llvm/lib/Analysis/NoAliasAnalysis.cpp
index 907e9621bae..5d6b2ea1ab8 100644
--- a/llvm/lib/Analysis/NoAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/NoAliasAnalysis.cpp
@@ -36,7 +36,7 @@ namespace {
virtual void initializePass() {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
}
virtual AliasResult alias(const Location &LocA, const Location &LocB) {
diff --git a/llvm/lib/Analysis/PHITransAddr.cpp b/llvm/lib/Analysis/PHITransAddr.cpp
index bcff002fee6..866f82a17a1 100644
--- a/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/llvm/lib/Analysis/PHITransAddr.cpp
@@ -229,7 +229,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEP;
// Simplify the GEP to handle 'gep x, 0' -> x etc.
- if (Value *V = SimplifyGEPInst(GEPOps, TD, TLI, DT)) {
+ if (Value *V = SimplifyGEPInst(GEPOps, DL, TLI, DT)) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
@@ -285,7 +285,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT)) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
@@ -372,7 +372,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
SmallVectorImpl<Instruction*> &NewInsts) {
// See if we have a version of this value already available and dominating
// PredBB. If so, there is no need to insert a new instance of it.
- PHITransAddr Tmp(InVal, TD);
+ PHITransAddr Tmp(InVal, DL);
if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT))
return Tmp.getAddr();
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 03fee883240..f2523af9ab0 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2663,12 +2663,12 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
- if (TD)
- return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
+ if (DL)
+ return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
assert(Ty == IntTy && "Effective SCEV type doesn't match");
@@ -2681,14 +2681,14 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
- if (TD) {
+ if (DL) {
return getConstant(IntTy,
- TD->getStructLayout(STy)->getElementOffset(FieldNo));
+ DL->getStructLayout(STy)->getElementOffset(FieldNo));
}
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
@@ -2736,8 +2736,8 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
// If we have a DataLayout, use it!
- if (TD)
- return TD->getTypeSizeInBits(Ty);
+ if (DL)
+ return DL->getTypeSizeInBits(Ty);
// Integer types have fixed sizes.
if (Ty->isIntegerTy())
@@ -2763,8 +2763,8 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
- if (TD)
- return TD->getIntPtrType(Ty);
+ if (DL)
+ return DL->getIntPtrType(Ty);
// Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
@@ -3232,7 +3232,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
+ if (Value *V = SimplifyInstruction(PN, DL, TLI, DT))
if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -3503,7 +3503,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
+ ComputeMaskedBits(U->getValue(), Zeros, Ones, DL);
if (Ones == ~Zeros + 1)
return setUnsignedRange(U, ConservativeResult);
return setUnsignedRange(U,
@@ -3653,9 +3653,9 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- if (!U->getValue()->getType()->isIntegerTy() && !TD)
+ if (!U->getValue()->getType()->isIntegerTy() && !DL)
return setSignedRange(U, ConservativeResult);
- unsigned NS = ComputeNumSignBits(U->getValue(), TD);
+ unsigned NS = ComputeNumSignBits(U->getValue(), DL);
if (NS <= 1)
return setSignedRange(U, ConservativeResult);
return setSignedRange(U, ConservativeResult.intersectWith(
@@ -3762,7 +3762,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned TZ = A.countTrailingZeros();
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
+ ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, DL);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@@ -4956,7 +4956,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
@@ -4983,7 +4983,7 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
if (!Operands[i]) return 0;
continue;
}
- Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
+ Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
Vals[Operand] = C;
if (!C) return 0;
Operands[i] = C;
@@ -4991,12 +4991,12 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
if (CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
- Operands[1], TD, TLI);
+ Operands[1], DL, TLI);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
- return ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ return ConstantFoldLoadFromConstPtr(Operands[0], DL);
}
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
TLI);
}
@@ -5052,7 +5052,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
// Compute the value of the PHIs for the next iteration.
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
DenseMap<Instruction *, Constant *> NextIterVals;
- Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
+ Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
TLI);
if (NextPHI == 0)
return 0; // Couldn't evaluate!
@@ -5078,7 +5078,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
Constant *&NextPHI = NextIterVals[PHI];
if (!NextPHI) { // Not already computed.
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
- NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
}
if (NextPHI != I->second)
StoppedEvolving = false;
@@ -5134,7 +5134,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
ConstantInt *CondVal =
dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
- TD, TLI));
+ DL, TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
@@ -5164,7 +5164,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
if (NextPHI) continue; // Already computed!
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
- NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
}
CurrentIterVals.swap(NextIterVals);
}
@@ -5369,14 +5369,14 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
Constant *C = 0;
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD,
+ Operands[0], Operands[1], DL,
TLI);
else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
- C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
} else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- Operands, TD, TLI);
+ Operands, DL, TLI);
if (!C) return V;
return getSCEV(C);
}
@@ -7385,7 +7385,7 @@ ScalarEvolution::ScalarEvolution()
bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 8c540589631..b778c6e3467 100644
--- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -210,7 +210,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
@@ -250,7 +250,7 @@ static bool FactorOutConstant(const SCEV *&S,
// In a Mul, check if there is a constant operand which is a multiple
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
- if (TD) {
+ if (DL) {
// With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
@@ -270,7 +270,7 @@ static bool FactorOutConstant(const SCEV *&S,
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
- if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
+ if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
Remainder->isZero()) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
@@ -285,12 +285,12 @@ static bool FactorOutConstant(const SCEV *&S,
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = A->getStepRecurrence(SE);
const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
- if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
+ if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
return false;
if (!StepRem->isZero())
return false;
const SCEV *Start = A->getStart();
- if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
+ if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
return false;
S = SE.getAddRecExpr(Start, Step, A->getLoop(),
A->getNoWrapFlags(SCEV::FlagNW));
@@ -404,8 +404,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// without the other.
SplitAddRecs(Ops, Ty, SE);
- Type *IntPtrTy = SE.TD
- ? SE.TD->getIntPtrType(PTy)
+ Type *IntPtrTy = SE.DL
+ ? SE.DL->getIntPtrType(PTy)
: Type::getInt64Ty(PTy->getContext());
// Descend down the pointer's type and attempt to convert the other
@@ -424,7 +424,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
const SCEV *Remainder = SE.getConstant(Ty, 0);
- if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
+ if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
// Op now has ElSize factored out.
ScaledOps.push_back(Op);
if (!Remainder->isZero())
@@ -458,13 +458,13 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
- if (SE.TD) {
+ if (SE.DL) {
// With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
- const StructLayout &SL = *SE.TD->getStructLayout(STy);
+ const StructLayout &SL = *SE.DL->getStructLayout(STy);
uint64_t FullOffset = C->getValue()->getZExtValue();
if (FullOffset < SL.getSizeInBytes()) {
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
OpenPOWER on IntegriCloud