summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com>2019-08-02 04:03:37 +0000
committerStanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com>2019-08-02 04:03:37 +0000
commit6fe00a21f2436490f1c05247c612866dd59c03a5 (patch)
treecc80b151fc23c6763c9f5a0c455f1895315f8941
parentfec7da8285b1e41fcf524c42f389f1cd87a2690f (diff)
downloadbcm5719-llvm-6fe00a21f2436490f1c05247c612866dd59c03a5.tar.gz
bcm5719-llvm-6fe00a21f2436490f1c05247c612866dd59c03a5.zip
Handle casts changing pointer size in the vectorizer
Added code to truncate or shrink offsets so that we can continue base pointer search if size has changed along the way. Differential Revision: https://reviews.llvm.org/D65612 llvm-svn: 367646
-rw-r--r--llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp21
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll39
2 files changed, 55 insertions, 5 deletions
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 19afe4157dc..a5b862bf930 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -147,7 +147,7 @@ private:
static const unsigned MaxDepth = 3;
bool isConsecutiveAccess(Value *A, Value *B);
- bool areConsecutivePointers(Value *PtrA, Value *PtrB, const APInt &PtrDelta,
+ bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta,
unsigned Depth = 0) const;
bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta,
unsigned Depth) const;
@@ -336,18 +336,29 @@ bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
}
bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB,
- const APInt &PtrDelta,
- unsigned Depth) const {
+ APInt PtrDelta, unsigned Depth) const {
unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType());
APInt OffsetA(PtrBitWidth, 0);
APInt OffsetB(PtrBitWidth, 0);
PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
- if (DL.getTypeStoreSizeInBits(PtrA->getType()) != PtrBitWidth ||
- DL.getTypeStoreSizeInBits(PtrB->getType()) != PtrBitWidth)
+ unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType());
+
+ if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType()))
return false;
+ // In case if we have to shrink the pointer
+ // stripAndAccumulateInBoundsConstantOffsets should properly handle a
+ // possible overflow and the value should fit into a smallest data type
+ // used in the cast/gep chain.
+ assert(OffsetA.getMinSignedBits() <= NewPtrBitWidth &&
+ OffsetB.getMinSignedBits() <= NewPtrBitWidth);
+
+ OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth);
+ OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth);
+ PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth);
+
APInt OffsetDelta = OffsetB - OffsetA;
// Check if they are based on the same pointer. That makes the offsets
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
index 1e9ffdba341..1cb8d14f177 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/vect-ptr-ptr-size-mismatch.ll
@@ -54,4 +54,43 @@ entry:
ret void
}
+; CHECK-LABEL: @ext_ptr
+; CHECK load <2 x i32>
+define void @ext_ptr(i32 addrspace(5)* %p) {
+entry:
+ %gep1 = getelementptr inbounds i32, i32 addrspace(5)* %p, i64 0
+ %gep2 = getelementptr inbounds i32, i32 addrspace(5)* %p, i64 1
+ %a.ascast = addrspacecast i32 addrspace(5)* %gep1 to i32*
+ %b.ascast = addrspacecast i32 addrspace(5)* %gep2 to i32*
+ %tmp1 = load i32, i32* %a.ascast, align 8
+ %tmp2 = load i32, i32* %b.ascast, align 8
+ unreachable
+}
+
+; CHECK-LABEL: @shrink_ptr
+; CHECK load <2 x i32>
+define void @shrink_ptr(i32* %p) {
+entry:
+ %gep1 = getelementptr inbounds i32, i32* %p, i64 0
+ %gep2 = getelementptr inbounds i32, i32* %p, i64 1
+ %a.ascast = addrspacecast i32* %gep1 to i32 addrspace(5)*
+ %b.ascast = addrspacecast i32* %gep2 to i32 addrspace(5)*
+ %tmp1 = load i32, i32 addrspace(5)* %a.ascast, align 8
+ %tmp2 = load i32, i32 addrspace(5)* %b.ascast, align 8
+ unreachable
+}
+
+; CHECK-LABEL: @ext_ptr_wrap
+; CHECK: load <2 x i8>
+define void @ext_ptr_wrap(i8 addrspace(5)* %p) {
+entry:
+ %gep1 = getelementptr inbounds i8, i8 addrspace(5)* %p, i64 0
+ %gep2 = getelementptr inbounds i8, i8 addrspace(5)* %p, i64 4294967295
+ %a.ascast = addrspacecast i8 addrspace(5)* %gep1 to i8*
+ %b.ascast = addrspacecast i8 addrspace(5)* %gep2 to i8*
+ %tmp1 = load i8, i8* %a.ascast, align 1
+ %tmp2 = load i8, i8* %b.ascast, align 1
+ unreachable
+}
+
!0 = !{}
OpenPOWER on IntegriCloud