diff options
| author | Tim Northover <tnorthover@apple.com> | 2019-07-09 11:35:35 +0000 |
|---|---|---|
| committer | Tim Northover <tnorthover@apple.com> | 2019-07-09 11:35:35 +0000 |
| commit | 60afa49abed6efdd7196d38ec884727090cfe373 (patch) | |
| tree | a9af173067a23e1f6fa896cfb31bde34ec7cb3b3 /llvm/lib/Transforms | |
| parent | 01eaae6dd12862cda6b42d565a215b07a178aba6 (diff) | |
| download | bcm5719-llvm-60afa49abed6efdd7196d38ec884727090cfe373.tar.gz bcm5719-llvm-60afa49abed6efdd7196d38ec884727090cfe373.zip | |
OpaquePtr: add Type parameter to Loads analysis API.
This makes the functions in Loads.h require a type to be specified
independently of the pointer Value so that when pointers have no structure
other than address-space, it can still do its job.
Most callers had an obvious memory operation handy to provide this type, but a
SROA and ArgumentPromotion were doing more complicated analysis. They get
updated to merge the properties of the various instructions they were
considering.
llvm-svn: 365468
Diffstat (limited to 'llvm/lib/Transforms')
| -rw-r--r-- | llvm/lib/Transforms/IPO/ArgumentPromotion.cpp | 52 | ||||
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp | 6 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/LICM.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/MergeICmps.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 16 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp | 2 |
7 files changed, 63 insertions, 21 deletions
diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index 8b77adc78f3..95a9f31cced 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -479,9 +479,9 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote, return NF; } -/// AllCallersPassInValidPointerForArgument - Return true if we can prove that -/// all callees pass in a valid pointer for the specified function argument. -static bool allCallersPassInValidPointerForArgument(Argument *Arg) { +/// Return true if we can prove that all callees pass in a valid pointer for the +/// specified function argument. +static bool allCallersPassValidPointerForArgument(Argument *Arg, Type *Ty) { Function *Callee = Arg->getParent(); const DataLayout &DL = Callee->getParent()->getDataLayout(); @@ -493,7 +493,7 @@ static bool allCallersPassInValidPointerForArgument(Argument *Arg) { CallSite CS(U); assert(CS && "Should only have direct calls!"); - if (!isDereferenceablePointer(CS.getArgument(ArgNo), DL)) + if (!isDereferenceablePointer(CS.getArgument(ArgNo), Ty, DL)) return false; } return true; @@ -566,7 +566,7 @@ static void markIndicesSafe(const IndicesVector &ToMark, /// This method limits promotion of aggregates to only promote up to three /// elements of the aggregate in order to avoid exploding the number of /// arguments passed in. -static bool isSafeToPromoteArgument(Argument *Arg, bool isByVal, AAResults &AAR, +static bool isSafeToPromoteArgument(Argument *Arg, Type *ByValTy, AAResults &AAR, unsigned MaxElements) { using GEPIndicesSet = std::set<IndicesVector>; @@ -596,9 +596,28 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByVal, AAResults &AAR, GEPIndicesSet ToPromote; // If the pointer is always valid, any load with first index 0 is valid. - if (isByVal || allCallersPassInValidPointerForArgument(Arg)) + + if (ByValTy) SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); + // Whenever a new underlying type for the operand is found, make sure it's + // consistent with the GEPs and loads we've already seen and, if necessary, + // use it to see if all incoming pointers are valid (which implies the 0-index + // is safe). + Type *BaseTy = ByValTy; + auto UpdateBaseTy = [&](Type *NewBaseTy) { + if (BaseTy) + return BaseTy == NewBaseTy; + + BaseTy = NewBaseTy; + if (allCallersPassValidPointerForArgument(Arg, BaseTy)) { + assert(SafeToUnconditionallyLoad.empty()); + SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); + } + + return true; + }; + // First, iterate the entry block and mark loads of (geps of) arguments as // safe. BasicBlock &EntryBlock = Arg->getParent()->front(); @@ -621,6 +640,9 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByVal, AAResults &AAR, // right away, can't promote this argument at all. return false; + if (!UpdateBaseTy(GEP->getSourceElementType())) + return false; + // Indices checked out, mark them as safe markIndicesSafe(Indices, SafeToUnconditionallyLoad); Indices.clear(); @@ -628,6 +650,11 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByVal, AAResults &AAR, } else if (V == Arg) { // Direct loads are equivalent to a GEP with a single 0 index. markIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad); + + if (BaseTy && LI->getType() != BaseTy) + return false; + + BaseTy = LI->getType(); } } @@ -645,6 +672,9 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByVal, AAResults &AAR, Loads.push_back(LI); // Direct loads are equivalent to a GEP with a zero index and then a load. Operands.push_back(0); + + if (!UpdateBaseTy(LI->getType())) + return false; } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) { if (GEP->use_empty()) { // Dead GEP's cause trouble later. Just remove them if we run into @@ -653,9 +683,12 @@ static bool isSafeToPromoteArgument(Argument *Arg, bool isByVal, AAResults &AAR, // TODO: This runs the above loop over and over again for dead GEPs // Couldn't we just do increment the UI iterator earlier and erase the // use? - return isSafeToPromoteArgument(Arg, isByVal, AAR, MaxElements); + return isSafeToPromoteArgument(Arg, ByValTy, AAR, MaxElements); } + if (!UpdateBaseTy(GEP->getSourceElementType())) + return false; + // Ensure that all of the indices are constants. for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); i != e; ++i) @@ -966,8 +999,9 @@ promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter, } // Otherwise, see if we can promote the pointer to its value. - if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValAttr(), AAR, - MaxElements)) + Type *ByValTy = + PtrArg->hasByValAttr() ? PtrArg->getParamByValType() : nullptr; + if (isSafeToPromoteArgument(PtrArg, ByValTy, AAR, MaxElements)) ArgsToPromote.insert(PtrArg); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 63f313e1f7f..4b3333affa7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1060,7 +1060,7 @@ Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) { // If we can unconditionally load from this address, replace with a // load/select idiom. TODO: use DT for context sensitive query - if (isDereferenceableAndAlignedPointer(LoadPtr, Alignment, + if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment, II.getModule()->getDataLayout(), &II, nullptr)) { Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 0df80e07e84..054fb7da09a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -1064,8 +1064,10 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). unsigned Align = LI.getAlignment(); - if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && - isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { + if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(), Align, + DL, SI) && + isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(), Align, + DL, SI)) { LoadInst *V1 = Builder.CreateLoad(LI.getType(), SI->getOperand(1), SI->getOperand(1)->getName() + ".val"); diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index 2d27a660102..3c5e773f793 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -2016,8 +2016,8 @@ bool llvm::promoteLoopAccessesToScalars( // deref info through it. if (!DereferenceableInPH) { DereferenceableInPH = isDereferenceableAndAlignedPointer( - Store->getPointerOperand(), Store->getAlignment(), MDL, - Preheader->getTerminator(), DT); + Store->getPointerOperand(), Store->getValueOperand()->getType(), + Store->getAlignment(), MDL, Preheader->getTerminator(), DT); } } else return false; // Not a load or store. diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp index 630f029850c..3d047a19326 100644 --- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp +++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp @@ -162,7 +162,7 @@ BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) { return {}; } const auto &DL = GEP->getModule()->getDataLayout(); - if (!isDereferenceablePointer(GEP, DL)) { + if (!isDereferenceablePointer(GEP, LoadI->getType(), DL)) { LLVM_DEBUG(dbgs() << "not dereferenceable\n"); // We need to make sure that we can do comparison in any order, so we // require memory to be unconditionnally dereferencable. diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index cc79afdb05a..33f90d0b01e 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1190,12 +1190,16 @@ static Type *findCommonType(AllocaSlices::const_iterator B, /// FIXME: This should be hoisted into a generic utility, likely in /// Transforms/Util/Local.h static bool isSafePHIToSpeculate(PHINode &PN) { + const DataLayout &DL = PN.getModule()->getDataLayout(); + // For now, we can only do this promotion if the load is in the same block // as the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. // TODO: Allow stores. BasicBlock *BB = PN.getParent(); unsigned MaxAlign = 0; + uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); + APInt MaxSize(APWidth, 0); bool HaveLoad = false; for (User *U : PN.users()) { LoadInst *LI = dyn_cast<LoadInst>(U); @@ -1214,15 +1218,15 @@ static bool isSafePHIToSpeculate(PHINode &PN) { if (BBI->mayWriteToMemory()) return false; + uint64_t Size = DL.getTypeStoreSizeInBits(LI->getType()); MaxAlign = std::max(MaxAlign, LI->getAlignment()); + MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; HaveLoad = true; } if (!HaveLoad) return false; - const DataLayout &DL = PN.getModule()->getDataLayout(); - // We can only transform this if it is safe to push the loads into the // predecessor blocks. The only thing to watch out for is that we can't put // a possibly trapping load in the predecessor if it is a critical edge. @@ -1244,7 +1248,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) { // If this pointer is always safe to load, or if we can prove that there // is already a load in the block, then we can move the load to the pred // block. - if (isSafeToLoadUnconditionally(InVal, MaxAlign, DL, TI)) + if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) continue; return false; @@ -1334,9 +1338,11 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) { // Both operands to the select need to be dereferenceable, either // absolutely (e.g. allocas) or at this point because we can see other // accesses to it. - if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), DL, LI)) + if (!isSafeToLoadUnconditionally(TValue, LI->getType(), LI->getAlignment(), + DL, LI)) return false; - if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), DL, LI)) + if (!isSafeToLoadUnconditionally(FValue, LI->getType(), LI->getAlignment(), + DL, LI)) return false; } diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp index c3ccfde8acc..f0b79079d81 100644 --- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -340,7 +340,7 @@ static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) { // being loaded from. const DataLayout &DL = L->getModule()->getDataLayout(); if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) || - !isSafeToLoadUnconditionally(L->getPointerOperand(), + !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(), L->getAlignment(), DL, L)) return false; } |

