summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoadStoreVectorizer
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2018-09-18 02:05:44 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2018-09-18 02:05:44 +0000
commitc64079859767f30319bd3ceb93e36f35e2dc0b17 (patch)
treee9199d63d81c006f2aa19f186377dee6f629e7cb /llvm/test/Transforms/LoadStoreVectorizer
parentf1b0b47b2d04d9c4d5f0ed0f9970181ef52b0241 (diff)
downloadbcm5719-llvm-c64079859767f30319bd3ceb93e36f35e2dc0b17.tar.gz
bcm5719-llvm-c64079859767f30319bd3ceb93e36f35e2dc0b17.zip
LSV: Fix adjust alloca alignment trick for AMDGPU
This was checking the hardcoded address space 0 for the stack. Additionally, this should be checking for legality with the adjusted alignment, so defer the alignment check. Also try to split if the unaligned access isn't allowed. llvm-svn: 342442
Diffstat (limited to 'llvm/test/Transforms/LoadStoreVectorizer')
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll86
1 files changed, 81 insertions, 5 deletions
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
index df48da0619d..fb704efb1e9 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
@@ -64,10 +64,7 @@ define amdgpu_kernel void @load_unknown_offset_align1_i32(i32 addrspace(1)* noal
; ALL: alloca [128 x i32], align 16
; UNALIGNED: load <2 x i32>, <2 x i32> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-
-; FIXME: Should change alignment
-; ALIGNED: load i32
-; ALIGNED: load i32
+; ALIGNED: load <2 x i32>, <2 x i32> addrspace(5)* %{{[0-9]+}}, align 4{{$}}
define amdgpu_kernel void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
%alloca = alloca [128 x i32], align 16, addrspace(5)
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca, i32 0, i32 %offset
@@ -128,5 +125,84 @@ define amdgpu_kernel void @store_unknown_offset_align1_i32(i32 addrspace(1)* noa
ret void
}
-attributes #0 = { nounwind }
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32(
+; ALIGNED: %alloca = alloca [8 x i32], align 4, addrspace(5)
+; ALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 4
+
+; UNALIGNED: %alloca = alloca [8 x i32], align 1, addrspace(5)
+; UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32() {
+ %alloca = alloca [8 x i32], align 1, addrspace(5)
+ %out = bitcast [8 x i32] addrspace(5)* %alloca to i32 addrspace(5)*
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
+
+ store i32 9, i32 addrspace(5)* %out, align 1
+ store i32 1, i32 addrspace(5)* %out.gep.1, align 1
+ store i32 23, i32 addrspace(5)* %out.gep.2, align 1
+ store i32 19, i32 addrspace(5)* %out.gep.3, align 1
+ ret void
+}
+
+; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i8(
+; ALIGNED: %alloca = alloca [8 x i8], align 4, addrspace(5)
+; ALIGNED: store <4 x i8> <i8 9, i8 1, i8 23, i8 19>, <4 x i8> addrspace(5)* %1, align 4
+
+; UNALIGNED: %alloca = alloca [8 x i8], align 1, addrspace(5)
+; UNALIGNED: store <4 x i8> <i8 9, i8 1, i8 23, i8 19>, <4 x i8> addrspace(5)* %1, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8() {
+ %alloca = alloca [8 x i8], align 1, addrspace(5)
+ %out = bitcast [8 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
+ %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i8 1
+ %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i8 2
+ %out.gep.3 = getelementptr i8, i8 addrspace(5)* %out, i8 3
+
+ store i8 9, i8 addrspace(5)* %out, align 1
+ store i8 1, i8 addrspace(5)* %out.gep.1, align 1
+ store i8 23, i8 addrspace(5)* %out.gep.2, align 1
+ store i8 19, i8 addrspace(5)* %out.gep.3, align 1
+ ret void
+}
+; ALL-LABEL: @merge_private_load_4_vector_elts_loads_v4i32(
+; ALIGNED: %alloca = alloca [8 x i32], align 4, addrspace(5)
+; ALIGNED: load <4 x i32>, <4 x i32> addrspace(5)* %1, align 4
+
+; UNALIGNED: %alloca = alloca [8 x i32], align 1, addrspace(5)
+; UNALIGNED: load <4 x i32>, <4 x i32> addrspace(5)* %1, align 1
+define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i32() {
+ %alloca = alloca [8 x i32], align 1, addrspace(5)
+ %out = bitcast [8 x i32] addrspace(5)* %alloca to i32 addrspace(5)*
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
+
+ %load0 = load i32, i32 addrspace(5)* %out, align 1
+ %load1 = load i32, i32 addrspace(5)* %out.gep.1, align 1
+ %load2 = load i32, i32 addrspace(5)* %out.gep.2, align 1
+ %load3 = load i32, i32 addrspace(5)* %out.gep.3, align 1
+ ret void
+}
+
+; ALL-LABEL: @merge_private_load_4_vector_elts_loads_v4i8(
+; ALIGNED: %alloca = alloca [8 x i8], align 4, addrspace(5)
+; ALIGNED: load <4 x i8>, <4 x i8> addrspace(5)* %1, align 4
+
+; UNALIGNED: %alloca = alloca [8 x i8], align 1, addrspace(5)
+; UNALIGNED: load <4 x i8>, <4 x i8> addrspace(5)* %1, align 1
+define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() {
+ %alloca = alloca [8 x i8], align 1, addrspace(5)
+ %out = bitcast [8 x i8] addrspace(5)* %alloca to i8 addrspace(5)*
+ %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i8 1
+ %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i8 2
+ %out.gep.3 = getelementptr i8, i8 addrspace(5)* %out, i8 3
+
+ %load0 = load i8, i8 addrspace(5)* %out, align 1
+ %load1 = load i8, i8 addrspace(5)* %out.gep.1, align 1
+ %load2 = load i8, i8 addrspace(5)* %out.gep.2, align 1
+ %load3 = load i8, i8 addrspace(5)* %out.gep.3, align 1
+ ret void
+}
+
+attributes #0 = { nounwind }
OpenPOWER on IntegriCloud