diff options
Diffstat (limited to 'llvm/lib/Analysis/Loads.cpp')
-rw-r--r-- | llvm/lib/Analysis/Loads.cpp | 204 |
1 files changed, 204 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index c91b892b493..5004f2b8f0c 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -21,8 +21,212 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" +#include "llvm/IR/Statepoint.h" + using namespace llvm; +static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, + Type *Ty, const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + assert(Offset.isNonNegative() && "offset can't be negative"); + assert(Ty->isSized() && "must be sized"); + + APInt DerefBytes(Offset.getBitWidth(), 0); + bool CheckForNonNull = false; + if (const Argument *A = dyn_cast<Argument>(BV)) { + DerefBytes = A->getDereferenceableBytes(); + if (!DerefBytes.getBoolValue()) { + DerefBytes = A->getDereferenceableOrNullBytes(); + CheckForNonNull = true; + } + } else if (auto CS = ImmutableCallSite(BV)) { + DerefBytes = CS.getDereferenceableBytes(0); + if (!DerefBytes.getBoolValue()) { + DerefBytes = CS.getDereferenceableOrNullBytes(0); + CheckForNonNull = true; + } + } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) { + if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { + ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + if (!DerefBytes.getBoolValue()) { + if (MDNode *MD = + LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { + ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + CheckForNonNull = true; + } + } + + if (DerefBytes.getBoolValue()) + if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) + if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) + return true; + + return false; +} + +static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + Type *VTy = V->getType(); + Type *Ty = VTy->getPointerElementType(); + if (!Ty->isSized()) + return false; + + APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); + return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); +} + +static bool isAligned(const Value *Base, APInt Offset, unsigned Align, + const DataLayout &DL) { + APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL)); + + if (!BaseAlign) { + Type *Ty = Base->getType()->getPointerElementType(); + if (!Ty->isSized()) + return false; + BaseAlign = DL.getABITypeAlignment(Ty); + } + + APInt Alignment(Offset.getBitWidth(), Align); + + assert(Alignment.isPowerOf2() && "must be a power of 2!"); + return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1)); +} + +static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) { + Type *Ty = Base->getType(); + assert(Ty->isSized() && "must be sized"); + APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); + return isAligned(Base, Offset, Align, DL); +} + +/// Test if V is always a pointer to allocated and suitably aligned memory for +/// a simple load or store. +static bool isDereferenceableAndAlignedPointer( + const Value *V, unsigned Align, const DataLayout &DL, + const Instruction *CtxI, const DominatorTree *DT, + const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) { + // Note that it is not safe to speculate into a malloc'd region because + // malloc may return null. + + // These are obviously ok if aligned. + if (isa<AllocaInst>(V)) + return isAligned(V, Align, DL); + + // It's not always safe to follow a bitcast, for example: + // bitcast i8* (alloca i8) to i32* + // would result in a 4-byte load from a 1-byte alloca. However, + // if we're casting from a pointer from a type of larger size + // to a type of smaller size (or the same size), and the alignment + // is at least as large as for the resulting pointer type, then + // we can look through the bitcast. + if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) { + Type *STy = BC->getSrcTy()->getPointerElementType(), + *DTy = BC->getDestTy()->getPointerElementType(); + if (STy->isSized() && DTy->isSized() && + (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && + (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) + return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL, + CtxI, DT, TLI, Visited); + } + + // Global variables which can't collapse to null are ok. + if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) + if (!GV->hasExternalWeakLinkage()) + return isAligned(V, Align, DL); + + // byval arguments are okay. + if (const Argument *A = dyn_cast<Argument>(V)) + if (A->hasByValAttr()) + return isAligned(V, Align, DL); + + if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) + return isAligned(V, Align, DL); + + // For GEPs, determine if the indexing lands within the allocated object. + if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { + Type *Ty = GEP->getResultElementType(); + const Value *Base = GEP->getPointerOperand(); + + // Conservatively require that the base pointer be fully dereferenceable + // and aligned. + if (!Visited.insert(Base).second) + return false; + if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI, + Visited)) + return false; + + APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0); + if (!GEP->accumulateConstantOffset(DL, Offset)) + return false; + + // Check if the load is within the bounds of the underlying object + // and offset is aligned. + uint64_t LoadSize = DL.getTypeStoreSize(Ty); + Type *BaseType = GEP->getSourceElementType(); + assert(isPowerOf2_32(Align) && "must be a power of 2!"); + return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) && + !(Offset & APInt(Offset.getBitWidth(), Align-1)); + } + + // For gc.relocate, look through relocations + if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V)) + return isDereferenceableAndAlignedPointer( + RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); + + if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V)) + return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL, + CtxI, DT, TLI, Visited); + + // If we don't know, assume the worst. + return false; +} + +bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, + const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + // When dereferenceability information is provided by a dereferenceable + // attribute, we know exactly how many bytes are dereferenceable. If we can + // determine the exact offset to the attributed variable, we can use that + // information here. + Type *VTy = V->getType(); + Type *Ty = VTy->getPointerElementType(); + + // Require ABI alignment for loads without alignment specification + if (Align == 0) + Align = DL.getABITypeAlignment(Ty); + + if (Ty->isSized()) { + APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); + const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); + + if (Offset.isNonNegative()) + if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) && + isAligned(BV, Offset, Align, DL)) + return true; + } + + SmallPtrSet<const Value *, 32> Visited; + return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI, + Visited); +} + +bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI); +} + /// \brief Test if A and B will obviously have the same value. /// /// This includes recognizing that %t0 and %t1 will have the same |