summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp37
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp15
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp5
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp7
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp14
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp28
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp7
7 files changed, 73 insertions, 40 deletions
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 883462a6fcb..96326347b71 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -85,15 +85,15 @@ const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
// depth otherwise the algorithm in aliasGEP will assert.
static const unsigned MaxLookupSearchDepth = 6;
-bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA,
+bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
// We don't care if this analysis itself is preserved, it has no state. But
// we need to check that the analyses it depends on have been. Note that we
// may be created without handles to some analyses and in that case don't
// depend on them.
- if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
- (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
- (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
+ if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
+ (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
+ (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)))
return true;
// Otherwise this analysis result remains valid.
@@ -150,10 +150,12 @@ static bool isEscapeSource(const Value *V) {
/// Returns the size of the object specified by V or UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
+ bool NullIsValidLoc,
bool RoundToAlign = false) {
uint64_t Size;
ObjectSizeOpts Opts;
Opts.RoundToAlign = RoundToAlign;
+ Opts.NullIsUnknownSize = NullIsValidLoc;
if (getObjectSize(V, Size, DL, &TLI, Opts))
return Size;
return MemoryLocation::UnknownSize;
@@ -163,7 +165,8 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
/// Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const DataLayout &DL,
- const TargetLibraryInfo &TLI) {
+ const TargetLibraryInfo &TLI,
+ bool NullIsValidLoc) {
// Note that the meanings of the "object" are slightly different in the
// following contexts:
// c1: llvm::getObjectSize()
@@ -195,15 +198,16 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
+ /*RoundToAlign*/ true);
return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
}
/// Returns true if we can prove that the object specified by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
- const TargetLibraryInfo &TLI) {
- uint64_t ObjectSize = getObjectSize(V, DL, TLI);
+ const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
}
@@ -1623,10 +1627,10 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
- if (CPN->getType()->getAddressSpace() == 0)
+ if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
return NoAlias;
if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
- if (CPN->getType()->getAddressSpace() == 0)
+ if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
return NoAlias;
if (O1 != O2) {
@@ -1662,10 +1666,11 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
+ bool NullIsValidLocation = NullPointerIsDefined(&F);
if ((V1Size != MemoryLocation::UnknownSize &&
- isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
+ isObjectSmallerThan(O2, V1Size, DL, TLI, NullIsValidLocation)) ||
(V2Size != MemoryLocation::UnknownSize &&
- isObjectSmallerThan(O1, V2Size, DL, TLI)))
+ isObjectSmallerThan(O1, V2Size, DL, TLI, NullIsValidLocation)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1725,8 +1730,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
if (O1 == O2)
if (V1Size != MemoryLocation::UnknownSize &&
V2Size != MemoryLocation::UnknownSize &&
- (isObjectSize(O1, V1Size, DL, TLI) ||
- isObjectSize(O2, V2Size, DL, TLI)))
+ (isObjectSize(O1, V1Size, DL, TLI, NullIsValidLocation) ||
+ isObjectSize(O2, V2Size, DL, TLI, NullIsValidLocation)))
return AliasCache[Locs] = PartialAlias;
// Recurse back into the best AA results we have, potentially with refined
@@ -1870,6 +1875,7 @@ AnalysisKey BasicAA::Key;
BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
return BasicAAResult(F.getParent()->getDataLayout(),
+ F,
AM.getResult<TargetLibraryAnalysis>(F),
AM.getResult<AssumptionAnalysis>(F),
&AM.getResult<DominatorTreeAnalysis>(F),
@@ -1902,7 +1908,7 @@ bool BasicAAWrapperPass::runOnFunction(Function &F) {
auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
- Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
+ Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
ACT.getAssumptionCache(F), &DTWP.getDomTree(),
LIWP ? &LIWP->getLoopInfo() : nullptr));
@@ -1919,6 +1925,7 @@ void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
return BasicAAResult(
F.getParent()->getDataLayout(),
+ F,
P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
}
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 38e018f6db0..a49007ee499 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1589,7 +1589,8 @@ double getValueAsDouble(ConstantFP *Op) {
Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
ArrayRef<Constant *> Operands,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ ImmutableCallSite CS) {
if (Operands.size() == 1) {
if (isa<UndefValue>(Operands[0])) {
// cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
@@ -1603,7 +1604,8 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
}
if (isa<ConstantPointerNull>(Operands[0]) &&
- Operands[0]->getType()->getPointerAddressSpace() == 0) {
+ !NullPointerIsDefined(
+ CS.getCaller(), Operands[0]->getType()->getPointerAddressSpace())) {
// launder(null) == null == strip(null) iff in addrspace 0
if (IntrinsicID == Intrinsic::launder_invariant_group ||
IntrinsicID == Intrinsic::strip_invariant_group)
@@ -2007,7 +2009,8 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
VectorType *VTy, ArrayRef<Constant *> Operands,
const DataLayout &DL,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ ImmutableCallSite CS) {
SmallVector<Constant *, 4> Result(VTy->getNumElements());
SmallVector<Constant *, 4> Lane(Operands.size());
Type *Ty = VTy->getElementType();
@@ -2070,7 +2073,7 @@ Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
}
// Use the regular scalar folding to simplify this column.
- Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
+ Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, CS);
if (!Folded)
return nullptr;
Result[I] = Folded;
@@ -2095,9 +2098,9 @@ llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F,
if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
- F->getParent()->getDataLayout(), TLI);
+ F->getParent()->getDataLayout(), TLI, CS);
- return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
+ return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, CS);
}
bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 7a28ad431f6..a6cccc3b591 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -1994,6 +1994,11 @@ InlineCost llvm::getInlineCost(
if (Caller->hasFnAttribute(Attribute::OptimizeNone))
return llvm::InlineCost::getNever();
+ // Don't inline a function that treats null pointer as valid into a caller
+ // that does not have this attribute.
+ if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
+ return llvm::InlineCost::getNever();
+
// Don't inline functions which can be interposed at link-time. Don't inline
// functions marked noinline or call sites marked noinline.
// Note: inlining non-exact non-interposable functions is fine, since we know
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index c15f649e9c4..061ce8f0bd1 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -2120,9 +2120,12 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
uint64_t LHSSize, RHSSize;
+ ObjectSizeOpts Opts;
+ Opts.NullIsUnknownSize =
+ NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
if (LHSOffsetCI && RHSOffsetCI &&
- getObjectSize(LHS, LHSSize, DL, TLI) &&
- getObjectSize(RHS, RHSSize, DL, TLI)) {
+ getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
+ getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
if (!LHSOffsetValue.isNegative() &&
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index a133357979b..435b6f20519 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -704,9 +704,11 @@ bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV,
assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
// Before giving up, see if we can prove the pointer non-null local to
// this particular block.
- if (Val->getType()->isPointerTy() &&
- (isKnownNonZero(Val, DL) || isObjectDereferencedInBlock(Val, BB))) {
- PointerType *PTy = cast<PointerType>(Val->getType());
+ PointerType *PTy = dyn_cast<PointerType>(Val->getType());
+ if (PTy &&
+ (isKnownNonZero(Val, DL) ||
+ (isObjectDereferencedInBlock(Val, BB) &&
+ !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())))) {
Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
} else {
Result = ValueLatticeElement::getOverdefined();
@@ -739,9 +741,9 @@ bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV,
<< "' - overdefined because of pred (non local).\n");
// Before giving up, see if we can prove the pointer non-null local to
// this particular block.
- if (Val->getType()->isPointerTy() &&
- isObjectDereferencedInBlock(Val, BB)) {
- PointerType *PTy = cast<PointerType>(Val->getType());
+ PointerType *PTy = dyn_cast<PointerType>(Val->getType());
+ if (PTy && isObjectDereferencedInBlock(Val, BB) &&
+ !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())) {
Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
}
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 17b13802e1d..c6175bf9bee 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -500,11 +500,11 @@ public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
- AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI,
- MemoryDepChecker::DepCandidates &DA,
+ AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AliasAnalysis *AA,
+ LoopInfo *LI, MemoryDepChecker::DepCandidates &DA,
PredicatedScalarEvolution &PSE)
- : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false),
- PSE(PSE) {}
+ : DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
+ IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
/// Register a load and whether it is only read from.
void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
@@ -579,6 +579,9 @@ private:
const DataLayout &DL;
+ /// The loop being checked.
+ const Loop *TheLoop;
+
/// List of accesses that need a further dependence check.
MemAccessInfoList CheckDeps;
@@ -910,7 +913,10 @@ void AccessAnalysis::processMemAccesses() {
for (Value *UnderlyingObj : TempObjects) {
// nullptr never alias, don't join sets for pointer that have "null"
// in their UnderlyingObjects list.
- if (isa<ConstantPointerNull>(UnderlyingObj))
+ if (isa<ConstantPointerNull>(UnderlyingObj) &&
+ !NullPointerIsDefined(
+ TheLoop->getHeader()->getParent(),
+ UnderlyingObj->getType()->getPointerAddressSpace()))
continue;
UnderlyingObjToAccessMap::iterator Prev =
@@ -1026,8 +1032,9 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
bool IsNoWrapAddRec = !ShouldCheckWrap ||
PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
isNoWrapAddRec(Ptr, AR, PSE, Lp);
- bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
- if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
+ if (!IsNoWrapAddRec && !IsInBoundsGEP &&
+ NullPointerIsDefined(Lp->getHeader()->getParent(),
+ PtrTy->getAddressSpace())) {
if (Assume) {
PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
IsNoWrapAddRec = true;
@@ -1073,8 +1080,9 @@ int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
// If the SCEV could wrap but we have an inbounds gep with a unit stride we
// know we can't "wrap around the address space". In case of address space
// zero we know that this won't happen without triggering undefined behavior.
- if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
- Stride != 1 && Stride != -1) {
+ if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
+ (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
+ PtrTy->getAddressSpace()))) {
if (Assume) {
// We can avoid this case by adding a run-time check.
LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
@@ -1845,7 +1853,7 @@ void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
MemoryDepChecker::DepCandidates DependentAccesses;
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
- AA, LI, DependentAccesses, *PSE);
+ TheLoop, AA, LI, DependentAccesses, *PSE);
// Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 7ff1d5236a3..bdfd1783236 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1769,7 +1769,12 @@ bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
/// Currently this routine does not support vector GEPs.
static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
const Query &Q) {
- if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
+ const Function *F = nullptr;
+ if (const Instruction *I = dyn_cast<Instruction>(GEP))
+ F = I->getFunction();
+
+ if (!GEP->isInBounds() ||
+ NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
return false;
// FIXME: Support vector-GEPs.
OpenPOWER on IntegriCloud