diff options
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 82 |
1 files changed, 35 insertions, 47 deletions
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index a7c1dc14713..e874cd51a2c 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1878,19 +1878,19 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, /// /// This function is called to test each entry in a partioning which is slated /// for a single slice. -static bool -isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset, - uint64_t SliceEndOffset, VectorType *Ty, - uint64_t ElementSize, const Slice &S) { +static bool isVectorPromotionViableForSlice(AllocaSlices::Partition &P, + const Slice &S, VectorType *Ty, + uint64_t ElementSize, + const DataLayout &DL) { // First validate the slice offsets. uint64_t BeginOffset = - std::max(S.beginOffset(), SliceBeginOffset) - SliceBeginOffset; + std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); uint64_t BeginIndex = BeginOffset / ElementSize; if (BeginIndex * ElementSize != BeginOffset || BeginIndex >= Ty->getNumElements()) return false; uint64_t EndOffset = - std::min(S.endOffset(), SliceEndOffset) - SliceBeginOffset; + std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); uint64_t EndIndex = EndOffset / ElementSize; if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) return false; @@ -1922,7 +1922,7 @@ isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset, if (LI->isVolatile()) return false; Type *LTy = LI->getType(); - if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) { + if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { assert(LTy->isIntegerTy()); LTy = SplitIntTy; } @@ -1932,7 +1932,7 @@ isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset, if (SI->isVolatile()) return false; Type *STy = SI->getValueOperand()->getType(); - if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) { + if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { assert(STy->isIntegerTy()); STy = SplitIntTy; } @@ -1954,11 +1954,8 @@ isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset, /// SSA value. We only can ensure this for a limited set of operations, and we /// don't want to do the rewrites unless we are confident that the result will /// be promotable, so we have an early test here. -static VectorType * -isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset, - uint64_t SliceEndOffset, - AllocaSlices::const_range Slices, - ArrayRef<AllocaSlices::iterator> SplitUses) { +static VectorType *isVectorPromotionViable(AllocaSlices::Partition &P, + const DataLayout &DL) { // Collect the candidate types for vector-based promotion. Also track whether // we have different element types. SmallVector<VectorType *, 4> CandidateTys; @@ -1974,9 +1971,9 @@ isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset, } }; // Consider any loads or stores that are the exact size of the slice. - for (const auto &S : Slices) - if (S.beginOffset() == SliceBeginOffset && - S.endOffset() == SliceEndOffset) { + for (const Slice &S : P) + if (S.beginOffset() == P.beginOffset() && + S.endOffset() == P.endOffset()) { if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) CheckCandidateType(LI->getType()); else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) @@ -2043,14 +2040,12 @@ isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset, "vector size not a multiple of element size?"); ElementSize /= 8; - for (const auto &S : Slices) - if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset, - VTy, ElementSize, S)) + for (const Slice &S : P) + if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) return false; - for (const auto &SI : SplitUses) - if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset, - VTy, ElementSize, *SI)) + for (const Slice *S : P.splitSlices()) + if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) return false; return true; @@ -2066,11 +2061,13 @@ isVectorPromotionViable(const DataLayout &DL, uint64_t SliceBeginOffset, /// /// This implements the necessary checking for the \c isIntegerWideningViable /// test below on a single slice of the alloca. -static bool isIntegerWideningViableForSlice(const DataLayout &DL, - Type *AllocaTy, +static bool isIntegerWideningViableForSlice(const Slice &S, uint64_t AllocBeginOffset, - uint64_t Size, const Slice &S, + Type *AllocaTy, + const DataLayout &DL, bool &WholeAllocaOp) { + uint64_t Size = DL.getTypeStoreSize(AllocaTy); + uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; uint64_t RelEnd = S.endOffset() - AllocBeginOffset; @@ -2138,11 +2135,8 @@ static bool isIntegerWideningViableForSlice(const DataLayout &DL, /// This is a quick test to check whether we can rewrite the integer loads and /// stores to a particular alloca into wider loads and stores and be able to /// promote the resulting alloca. -static bool -isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, - uint64_t AllocBeginOffset, - AllocaSlices::const_range Slices, - ArrayRef<AllocaSlices::iterator> SplitUses) { +static bool isIntegerWideningViable(AllocaSlices::Partition &P, Type *AllocaTy, + const DataLayout &DL) { uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); // Don't create integer types larger than the maximum bitwidth. if (SizeInBits > IntegerType::MAX_INT_BITS) @@ -2160,24 +2154,24 @@ isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy, !canConvertValue(DL, IntTy, AllocaTy)) return false; - uint64_t Size = DL.getTypeStoreSize(AllocaTy); - // While examining uses, we ensure that the alloca has a covering load or // store. We don't want to widen the integer operations only to fail to // promote due to some other unsplittable entry (which we may make splittable // later). However, if there are only splittable uses, go ahead and assume // that we cover the alloca. + // FIXME: We shouldn't consider split slices that happen to start in the + // partition here... bool WholeAllocaOp = - Slices.begin() != Slices.end() ? false : DL.isLegalInteger(SizeInBits); + P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); - for (const auto &S : Slices) - if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, - S, WholeAllocaOp)) + for (const Slice &S : P) + if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, + WholeAllocaOp)) return false; - for (const auto &SI : SplitUses) - if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size, - *SI, WholeAllocaOp)) + for (const Slice *S : P.splitSlices()) + if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, + WholeAllocaOp)) return false; return WholeAllocaOp; @@ -3452,16 +3446,10 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); assert(DL->getTypeAllocSize(SliceTy) >= P.size()); - bool IsIntegerPromotable = isIntegerWideningViable( - *DL, SliceTy, P.beginOffset(), - AllocaSlices::const_range(P.begin(), P.end()), P.splitSlices()); + bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, *DL); VectorType *VecTy = - IsIntegerPromotable - ? nullptr - : isVectorPromotionViable( - *DL, P.beginOffset(), P.endOffset(), - AllocaSlices::const_range(P.begin(), P.end()), P.splitSlices()); + IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, *DL); if (VecTy) SliceTy = VecTy; |

