summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorSean Silva <chisophugis@gmail.com>2016-07-02 23:47:27 +0000
committerSean Silva <chisophugis@gmail.com>2016-07-02 23:47:27 +0000
commit45835e731d41d45e92973b4845d7f75e6997cb22 (patch)
tree65974666335fc28c12461b043187889ddec4fe4e /llvm/lib/Analysis
parente690b7a3c6b5cff7ce0e1e8824ab305f0fbad045 (diff)
downloadbcm5719-llvm-45835e731d41d45e92973b4845d7f75e6997cb22.tar.gz
bcm5719-llvm-45835e731d41d45e92973b4845d7f75e6997cb22.zip
Remove dead TLI arg of isKnownNonNull and propagate deadness. NFC.
This actually uncovered a surprisingly large chain of ultimately unused TLI args. From what I can gather, this argument is a remnant of when isKnownNonNull would look at the TLI directly. The current approach seems to be that InferFunctionAttrs runs early in the pipeline and uses TLI to annotate the TLI-dependent non-null information as return attributes. This also removes the dependence of functionattrs on TLI altogether. llvm-svn: 274455
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp7
-rw-r--r--llvm/lib/Analysis/Loads.cpp27
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp13
3 files changed, 21 insertions, 26 deletions
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index b52ffeebe61..a41be07fcd5 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -1975,7 +1975,7 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
RHS = RHS->stripPointerCasts();
// A non-null pointer is not equal to a null pointer.
- if (llvm::isKnownNonNull(LHS, TLI) && isa<ConstantPointerNull>(RHS) &&
+ if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
@@ -2130,10 +2130,9 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
// cannot be elided. We cannot fold malloc comparison to null. Also, the
// dynamic allocation call could be either of the operands.
Value *MI = nullptr;
- if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT, TLI))
+ if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
MI = LHS;
- else if (isAllocLikeFn(RHS, TLI) &&
- llvm::isKnownNonNullAt(LHS, CxtI, DT, TLI))
+ else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
MI = RHS;
// FIXME: We should also fold the compare when the pointer escapes, but the
// compare dominates the pointer escape
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 179b79ebcee..309a1d95efb 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -54,21 +54,21 @@ static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
static bool isDereferenceableAndAlignedPointer(
const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
const Instruction *CtxI, const DominatorTree *DT,
- const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
+ SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
// bitcast instructions are no-ops as far as dereferenceability is concerned.
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
- DL, CtxI, DT, TLI, Visited);
+ DL, CtxI, DT, Visited);
bool CheckForNonNull = false;
APInt KnownDerefBytes(Size.getBitWidth(),
V->getPointerDereferenceableBytes(DL, CheckForNonNull));
if (KnownDerefBytes.getBoolValue()) {
if (KnownDerefBytes.uge(Size))
- if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT, TLI))
+ if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
return isAligned(V, Align, DL);
}
@@ -89,17 +89,17 @@ static bool isDereferenceableAndAlignedPointer(
return Visited.insert(Base).second &&
isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
- CtxI, DT, TLI, Visited);
+ CtxI, DT, Visited);
}
// For gc.relocate, look through relocations
if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
return isDereferenceableAndAlignedPointer(
- RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, TLI, Visited);
+ RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
- DL, CtxI, DT, TLI, Visited);
+ DL, CtxI, DT, Visited);
// If we don't know, assume the worst.
return false;
@@ -108,8 +108,7 @@ static bool isDereferenceableAndAlignedPointer(
bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
const DataLayout &DL,
const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+ const DominatorTree *DT) {
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
@@ -127,14 +126,13 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
SmallPtrSet<const Value *, 32> Visited;
return ::isDereferenceableAndAlignedPointer(
V, Align, APInt(DL.getTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
- CtxI, DT, TLI, Visited);
+ CtxI, DT, Visited);
}
bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
- return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
+ const DominatorTree *DT) {
+ return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT);
}
/// \brief Test if A and B will obviously have the same value.
@@ -182,8 +180,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
const DataLayout &DL,
Instruction *ScanFrom,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+ const DominatorTree *DT) {
// Zero alignment means that the load has the ABI alignment for the target
if (Align == 0)
Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
@@ -191,7 +188,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
// If DT is not specified we can't make context-sensitive query
const Instruction* CtxI = DT ? ScanFrom : nullptr;
- if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI))
+ if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT))
return true;
int64_t ByteOffset = 0;
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 16dd3078160..a059b76d808 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -3052,8 +3052,7 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+ const DominatorTree *DT) {
const Operator *Inst = dyn_cast<Operator>(V);
if (!Inst)
return false;
@@ -3104,8 +3103,8 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
Attribute::SanitizeAddress))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
- return isDereferenceableAndAlignedPointer(
- LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
+ return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
+ LI->getAlignment(), DL, CtxI, DT);
}
case Instruction::Call: {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
@@ -3190,7 +3189,7 @@ bool llvm::mayBeMemoryDependent(const Instruction &I) {
}
/// Return true if we know that the specified value is never null.
-bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
+bool llvm::isKnownNonNull(const Value *V) {
assert(V->getType()->isPointerTy() && "V must be pointer type");
// Alloca never returns null, malloc might.
@@ -3257,8 +3256,8 @@ static bool isKnownNonNullFromDominatingCondition(const Value *V,
}
bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
- const DominatorTree *DT, const TargetLibraryInfo *TLI) {
- if (isKnownNonNull(V, TLI))
+ const DominatorTree *DT) {
+ if (isKnownNonNull(V))
return true;
return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
OpenPOWER on IntegriCloud