diff options
author | Philip Reames <listmail@philipreames.com> | 2019-09-12 16:49:10 +0000 |
---|---|---|
committer | Philip Reames <listmail@philipreames.com> | 2019-09-12 16:49:10 +0000 |
commit | b90f94f42e3286017066e13c89cd57be9743a0cd (patch) | |
tree | 0e0bf23e8742d7e26f646ba4f404d9c8b44e864d /llvm/lib/Analysis/Loads.cpp | |
parent | 2ad25a4aeeae6e070c9cb56cc15e82ba6e2231af (diff) | |
download | bcm5719-llvm-b90f94f42e3286017066e13c89cd57be9743a0cd.tar.gz bcm5719-llvm-b90f94f42e3286017066e13c89cd57be9743a0cd.zip |
[LV] Support invariant addresses in speculation logic
Implement a TODO from rL371452, and handle loop invariant addresses in predicated blocks. If we can prove that the load is safe to speculate into the header, then we can avoid using a masked.load in favour of a normal load.
This is mostly about vectorization robustness. In the common case, it's generally expected that LICM/LoadStorePromotion would have eliminated such loads entirely.
Differential Revision: https://reviews.llvm.org/D67372
llvm-svn: 371745
Diffstat (limited to 'llvm/lib/Analysis/Loads.cpp')
-rw-r--r-- | llvm/lib/Analysis/Loads.cpp | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index ea98ca915c6..f689a17cb28 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -198,18 +198,31 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, DominatorTree &DT) { auto &DL = LI->getModule()->getDataLayout(); Value *Ptr = LI->getPointerOperand(); + + APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()), + DL.getTypeStoreSize(LI->getType())); + unsigned Align = LI->getAlignment(); + if (Align == 0) + Align = DL.getABITypeAlignment(LI->getType()); + + Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI(); + + // If given a uniform (i.e. non-varying) address, see if we can prove the + // access is safe within the loop w/o needing predication. + if (L->isLoopInvariant(Ptr)) + return isDereferenceableAndAlignedPointer(Ptr, Align, EltSize, DL, + HeaderFirstNonPHI, &DT); + + // Otherwise, check to see if we have a repeating access pattern where we can + // prove that all accesses are well aligned and dereferenceable. auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr)); if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine()) return false; auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE)); if (!Step) return false; - APInt StepC = Step->getAPInt(); - APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()), - DL.getTypeStoreSize(LI->getType())); // TODO: generalize to access patterns which have gaps - // TODO: handle uniform addresses (if not already handled by LICM) - if (StepC != EltSize) + if (Step->getAPInt() != EltSize) return false; // TODO: If the symbolic trip count has a small bound (max count), we might @@ -226,11 +239,6 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition"); Value *Base = StartS->getValue(); - Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI(); - - unsigned Align = LI->getAlignment(); - if (Align == 0) - Align = DL.getABITypeAlignment(LI->getType()); // For the moment, restrict ourselves to the case where the access size is a // multiple of the requested alignment and the base is aligned. // TODO: generalize if a case found which warrants |