summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArtur Pilipenko <apilipenko@azulsystems.com>2016-02-24 12:49:04 +0000
committerArtur Pilipenko <apilipenko@azulsystems.com>2016-02-24 12:49:04 +0000
commit31bcca47d3c5e988826416a42a1bd92d511f5d38 (patch)
treeecb7adfcfa0ae4a0f538e340edb1ec9777fbda8f
parentae51afc5c7b01c331bf240323a79597cc961ae27 (diff)
downloadbcm5719-llvm-31bcca47d3c5e988826416a42a1bd92d511f5d38.tar.gz
bcm5719-llvm-31bcca47d3c5e988826416a42a1bd92d511f5d38.zip
NFC. Move isDereferenceable to Loads.h/cpp
This is a part of the refactoring to unify isSafeToLoadUnconditionally and isDereferenceablePointer functions. In subsequent change I'm going to eliminate isDerferenceableAndAlignedPointer from Loads API, leaving isSafeToLoadSpecualtively the only function to check is load instruction can be speculated. Reviewed By: hfinkel Differential Revision: http://reviews.llvm.org/D16180 llvm-svn: 261736
-rw-r--r--llvm/include/llvm/Analysis/Loads.h19
-rw-r--r--llvm/include/llvm/Analysis/ValueTracking.h19
-rw-r--r--llvm/lib/Analysis/Loads.cpp204
-rw-r--r--llvm/lib/Analysis/MemDerefPrinter.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp203
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp1
-rw-r--r--llvm/lib/Transforms/IPO/ArgumentPromotion.cpp1
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp1
8 files changed, 228 insertions, 222 deletions
diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h
index 9eef7f8e5d4..49d7a222c92 100644
--- a/llvm/include/llvm/Analysis/Loads.h
+++ b/llvm/include/llvm/Analysis/Loads.h
@@ -23,6 +23,25 @@ namespace llvm {
class DataLayout;
class MDNode;
+/// isDereferenceablePointer - Return true if this is always a dereferenceable
+/// pointer. If the context instruction is specified perform context-sensitive
+/// analysis and return true if the pointer is dereferenceable at the
+/// specified instruction.
+bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// Returns true if V is always a dereferenceable pointer with alignment
+/// greater or equal than requested. If the context instruction is specified
+/// performs context-sensitive analysis and returns true if the pointer is
+/// dereferenceable at the specified instruction.
+bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+ const DataLayout &DL,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
+
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap.
///
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 8e029106847..b5f12048275 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -236,25 +236,6 @@ namespace llvm {
/// are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);
- /// isDereferenceablePointer - Return true if this is always a dereferenceable
- /// pointer. If the context instruction is specified perform context-sensitive
- /// analysis and return true if the pointer is dereferenceable at the
- /// specified instruction.
- bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
- const Instruction *CtxI = nullptr,
- const DominatorTree *DT = nullptr,
- const TargetLibraryInfo *TLI = nullptr);
-
- /// Returns true if V is always a dereferenceable pointer with alignment
- /// greater or equal than requested. If the context instruction is specified
- /// performs context-sensitive analysis and returns true if the pointer is
- /// dereferenceable at the specified instruction.
- bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
- const DataLayout &DL,
- const Instruction *CtxI = nullptr,
- const DominatorTree *DT = nullptr,
- const TargetLibraryInfo *TLI = nullptr);
-
/// isSafeToSpeculativelyExecute - Return true if the instruction does not
/// have any effects besides calculating the result and does not have
/// undefined behavior.
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index c91b892b493..5004f2b8f0c 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -21,8 +21,212 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/Statepoint.h"
+
using namespace llvm;
+static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
+ Type *Ty, const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
+ assert(Offset.isNonNegative() && "offset can't be negative");
+ assert(Ty->isSized() && "must be sized");
+
+ APInt DerefBytes(Offset.getBitWidth(), 0);
+ bool CheckForNonNull = false;
+ if (const Argument *A = dyn_cast<Argument>(BV)) {
+ DerefBytes = A->getDereferenceableBytes();
+ if (!DerefBytes.getBoolValue()) {
+ DerefBytes = A->getDereferenceableOrNullBytes();
+ CheckForNonNull = true;
+ }
+ } else if (auto CS = ImmutableCallSite(BV)) {
+ DerefBytes = CS.getDereferenceableBytes(0);
+ if (!DerefBytes.getBoolValue()) {
+ DerefBytes = CS.getDereferenceableOrNullBytes(0);
+ CheckForNonNull = true;
+ }
+ } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) {
+ if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ if (!DerefBytes.getBoolValue()) {
+ if (MDNode *MD =
+ LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ CheckForNonNull = true;
+ }
+ }
+
+ if (DerefBytes.getBoolValue())
+ if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
+ if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI))
+ return true;
+
+ return false;
+}
+
+static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
+ Type *VTy = V->getType();
+ Type *Ty = VTy->getPointerElementType();
+ if (!Ty->isSized())
+ return false;
+
+ APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
+ return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI);
+}
+
+static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
+ const DataLayout &DL) {
+ APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));
+
+ if (!BaseAlign) {
+ Type *Ty = Base->getType()->getPointerElementType();
+ if (!Ty->isSized())
+ return false;
+ BaseAlign = DL.getABITypeAlignment(Ty);
+ }
+
+ APInt Alignment(Offset.getBitWidth(), Align);
+
+ assert(Alignment.isPowerOf2() && "must be a power of 2!");
+ return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
+}
+
+static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
+ Type *Ty = Base->getType();
+ assert(Ty->isSized() && "must be sized");
+ APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
+ return isAligned(Base, Offset, Align, DL);
+}
+
+/// Test if V is always a pointer to allocated and suitably aligned memory for
+/// a simple load or store.
+static bool isDereferenceableAndAlignedPointer(
+ const Value *V, unsigned Align, const DataLayout &DL,
+ const Instruction *CtxI, const DominatorTree *DT,
+ const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
+ // Note that it is not safe to speculate into a malloc'd region because
+ // malloc may return null.
+
+ // These are obviously ok if aligned.
+ if (isa<AllocaInst>(V))
+ return isAligned(V, Align, DL);
+
+ // It's not always safe to follow a bitcast, for example:
+ // bitcast i8* (alloca i8) to i32*
+ // would result in a 4-byte load from a 1-byte alloca. However,
+ // if we're casting from a pointer from a type of larger size
+ // to a type of smaller size (or the same size), and the alignment
+ // is at least as large as for the resulting pointer type, then
+ // we can look through the bitcast.
+ if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
+ Type *STy = BC->getSrcTy()->getPointerElementType(),
+ *DTy = BC->getDestTy()->getPointerElementType();
+ if (STy->isSized() && DTy->isSized() &&
+ (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
+ (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
+ return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
+ CtxI, DT, TLI, Visited);
+ }
+
+ // Global variables which can't collapse to null are ok.
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ if (!GV->hasExternalWeakLinkage())
+ return isAligned(V, Align, DL);
+
+ // byval arguments are okay.
+ if (const Argument *A = dyn_cast<Argument>(V))
+ if (A->hasByValAttr())
+ return isAligned(V, Align, DL);
+
+ if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
+ return isAligned(V, Align, DL);
+
+ // For GEPs, determine if the indexing lands within the allocated object.
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ Type *Ty = GEP->getResultElementType();
+ const Value *Base = GEP->getPointerOperand();
+
+ // Conservatively require that the base pointer be fully dereferenceable
+ // and aligned.
+ if (!Visited.insert(Base).second)
+ return false;
+ if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
+ Visited))
+ return false;
+
+ APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
+ if (!GEP->accumulateConstantOffset(DL, Offset))
+ return false;
+
+ // Check if the load is within the bounds of the underlying object
+ // and offset is aligned.
+ uint64_t LoadSize = DL.getTypeStoreSize(Ty);
+ Type *BaseType = GEP->getSourceElementType();
+ assert(isPowerOf2_32(Align) && "must be a power of 2!");
+ return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
+ !(Offset & APInt(Offset.getBitWidth(), Align-1));
+ }
+
+ // For gc.relocate, look through relocations
+ if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
+ return isDereferenceableAndAlignedPointer(
+ RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
+
+ if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
+ return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
+ CtxI, DT, TLI, Visited);
+
+ // If we don't know, assume the worst.
+ return false;
+}
+
+bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+ const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
+ // When dereferenceability information is provided by a dereferenceable
+ // attribute, we know exactly how many bytes are dereferenceable. If we can
+ // determine the exact offset to the attributed variable, we can use that
+ // information here.
+ Type *VTy = V->getType();
+ Type *Ty = VTy->getPointerElementType();
+
+ // Require ABI alignment for loads without alignment specification
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(Ty);
+
+ if (Ty->isSized()) {
+ APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
+ const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
+
+ if (Offset.isNonNegative())
+ if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) &&
+ isAligned(BV, Offset, Align, DL))
+ return true;
+ }
+
+ SmallPtrSet<const Value *, 32> Visited;
+ return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI,
+ Visited);
+}
+
+bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
+ return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
+}
+
/// \brief Test if A and B will obviously have the same value.
///
/// This includes recognizing that %t0 and %t1 will have the same
diff --git a/llvm/lib/Analysis/MemDerefPrinter.cpp b/llvm/lib/Analysis/MemDerefPrinter.cpp
index 36f1424c8cf..fa0cc5a46c2 100644
--- a/llvm/lib/Analysis/MemDerefPrinter.cpp
+++ b/llvm/lib/Analysis/MemDerefPrinter.cpp
@@ -10,7 +10,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
-#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 617083a15d0..a6211ece22e 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -18,6 +18,7 @@
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/ConstantRange.h"
@@ -3106,208 +3107,6 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
return true;
}
-static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
- Type *Ty, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
- assert(Offset.isNonNegative() && "offset can't be negative");
- assert(Ty->isSized() && "must be sized");
-
- APInt DerefBytes(Offset.getBitWidth(), 0);
- bool CheckForNonNull = false;
- if (const Argument *A = dyn_cast<Argument>(BV)) {
- DerefBytes = A->getDereferenceableBytes();
- if (!DerefBytes.getBoolValue()) {
- DerefBytes = A->getDereferenceableOrNullBytes();
- CheckForNonNull = true;
- }
- } else if (auto CS = ImmutableCallSite(BV)) {
- DerefBytes = CS.getDereferenceableBytes(0);
- if (!DerefBytes.getBoolValue()) {
- DerefBytes = CS.getDereferenceableOrNullBytes(0);
- CheckForNonNull = true;
- }
- } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) {
- if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
- ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
- DerefBytes = CI->getLimitedValue();
- }
- if (!DerefBytes.getBoolValue()) {
- if (MDNode *MD =
- LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
- ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
- DerefBytes = CI->getLimitedValue();
- }
- CheckForNonNull = true;
- }
- }
-
- if (DerefBytes.getBoolValue())
- if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
- if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI))
- return true;
-
- return false;
-}
-
-static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
- Type *VTy = V->getType();
- Type *Ty = VTy->getPointerElementType();
- if (!Ty->isSized())
- return false;
-
- APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
- return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI);
-}
-
-static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
- const DataLayout &DL) {
- APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));
-
- if (!BaseAlign) {
- Type *Ty = Base->getType()->getPointerElementType();
- if (!Ty->isSized())
- return false;
- BaseAlign = DL.getABITypeAlignment(Ty);
- }
-
- APInt Alignment(Offset.getBitWidth(), Align);
-
- assert(Alignment.isPowerOf2() && "must be a power of 2!");
- return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
-}
-
-static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
- Type *Ty = Base->getType();
- assert(Ty->isSized() && "must be sized");
- APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
- return isAligned(Base, Offset, Align, DL);
-}
-
-/// Test if V is always a pointer to allocated and suitably aligned memory for
-/// a simple load or store.
-static bool isDereferenceableAndAlignedPointer(
- const Value *V, unsigned Align, const DataLayout &DL,
- const Instruction *CtxI, const DominatorTree *DT,
- const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
- // Note that it is not safe to speculate into a malloc'd region because
- // malloc may return null.
-
- // These are obviously ok if aligned.
- if (isa<AllocaInst>(V))
- return isAligned(V, Align, DL);
-
- // It's not always safe to follow a bitcast, for example:
- // bitcast i8* (alloca i8) to i32*
- // would result in a 4-byte load from a 1-byte alloca. However,
- // if we're casting from a pointer from a type of larger size
- // to a type of smaller size (or the same size), and the alignment
- // is at least as large as for the resulting pointer type, then
- // we can look through the bitcast.
- if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
- Type *STy = BC->getSrcTy()->getPointerElementType(),
- *DTy = BC->getDestTy()->getPointerElementType();
- if (STy->isSized() && DTy->isSized() &&
- (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
- (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
- return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
- CtxI, DT, TLI, Visited);
- }
-
- // Global variables which can't collapse to null are ok.
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- if (!GV->hasExternalWeakLinkage())
- return isAligned(V, Align, DL);
-
- // byval arguments are okay.
- if (const Argument *A = dyn_cast<Argument>(V))
- if (A->hasByValAttr())
- return isAligned(V, Align, DL);
-
- if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
- return isAligned(V, Align, DL);
-
- // For GEPs, determine if the indexing lands within the allocated object.
- if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- Type *Ty = GEP->getResultElementType();
- const Value *Base = GEP->getPointerOperand();
-
- // Conservatively require that the base pointer be fully dereferenceable
- // and aligned.
- if (!Visited.insert(Base).second)
- return false;
- if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
- Visited))
- return false;
-
- APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
- if (!GEP->accumulateConstantOffset(DL, Offset))
- return false;
-
- // Check if the load is within the bounds of the underlying object
- // and offset is aligned.
- uint64_t LoadSize = DL.getTypeStoreSize(Ty);
- Type *BaseType = GEP->getSourceElementType();
- assert(isPowerOf2_32(Align) && "must be a power of 2!");
- return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
- !(Offset & APInt(Offset.getBitWidth(), Align-1));
- }
-
- // For gc.relocate, look through relocations
- if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
- return isDereferenceableAndAlignedPointer(
- RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
-
- if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
- return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
- CtxI, DT, TLI, Visited);
-
- // If we don't know, assume the worst.
- return false;
-}
-
-bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
- const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
- // When dereferenceability information is provided by a dereferenceable
- // attribute, we know exactly how many bytes are dereferenceable. If we can
- // determine the exact offset to the attributed variable, we can use that
- // information here.
- Type *VTy = V->getType();
- Type *Ty = VTy->getPointerElementType();
-
- // Require ABI alignment for loads without alignment specification
- if (Align == 0)
- Align = DL.getABITypeAlignment(Ty);
-
- if (Ty->isSized()) {
- APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
- const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
-
- if (Offset.isNonNegative())
- if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) &&
- isAligned(BV, Offset, Align, DL))
- return true;
- }
-
- SmallPtrSet<const Value *, 32> Visited;
- return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI,
- Visited);
-}
-
-bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
- return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
-}
-
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
const Instruction *CtxI,
const DominatorTree *DT,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 79d307a0544..27a131e4243 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -20,6 +20,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 51eb36434f9..7b51de5ef7d 100644
--- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -38,6 +38,7 @@
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 249553aed27..a7c660468c1 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -14,6 +14,7 @@
#include "InstCombineInternal.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Dominators.h"
OpenPOWER on IntegriCloud