summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU
diff options
context:
space:
mode:
authorYaxun Liu <Yaxun.Liu@amd.com>2018-02-02 16:07:16 +0000
committerYaxun Liu <Yaxun.Liu@amd.com>2018-02-02 16:07:16 +0000
commit2a22c5deff3830d50fbc3f877ab30af9f42792f9 (patch)
tree25b57e509727b39c0a06715cccf5dbab3e1ea67e /llvm/test/Transforms/LoadStoreVectorizer/AMDGPU
parenta43e9653bbb388d7fe3d58541bdf13612705cc8f (diff)
downloadbcm5719-llvm-2a22c5deff3830d50fbc3f877ab30af9f42792f9.tar.gz
bcm5719-llvm-2a22c5deff3830d50fbc3f877ab30af9f42792f9.zip
[AMDGPU] Switch to the new addr space mapping by default
This requires corresponding clang change. Differential Revision: https://reviews.llvm.org/D40955 llvm-svn: 324101
Diffstat (limited to 'llvm/test/Transforms/LoadStoreVectorizer/AMDGPU')
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll115
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll189
2 files changed, 151 insertions, 153 deletions
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
index 368dc6ab361..87acb1057af 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
@@ -1,38 +1,37 @@
-; RUN: opt -S -load-store-vectorizer -mattr=-unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=ALIGNED -check-prefix=ALL %s
-; RUN: opt -S -load-store-vectorizer -mattr=+unaligned-buffer-access,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=UNALIGNED -check-prefix=ALL %s
+; RUN: opt -data-layout=A5 -S -load-store-vectorizer -mattr=-unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=ALIGNED -check-prefix=ALL %s
+; RUN: opt -data-layout=A5 -S -load-store-vectorizer -mattr=+unaligned-buffer-access,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=UNALIGNED -check-prefix=ALL %s
-target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
target triple = "amdgcn--"
; ALL-LABEL: @load_unknown_offset_align1_i8(
; ALL: alloca [128 x i8], align 1
-; UNALIGNED: load <2 x i8>, <2 x i8>* %{{[0-9]+}}, align 1{{$}}
+; UNALIGNED: load <2 x i8>, <2 x i8> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: load i8, i8* %ptr0, align 1{{$}}
-; ALIGNED: load i8, i8* %ptr1, align 1{{$}}
+; ALIGNED: load i8, i8 addrspace(5)* %ptr0, align 1{{$}}
+; ALIGNED: load i8, i8 addrspace(5)* %ptr1, align 1{{$}}
define amdgpu_kernel void @load_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i8], align 1
- %ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset
- %val0 = load i8, i8* %ptr0, align 1
- %ptr1 = getelementptr inbounds i8, i8* %ptr0, i32 1
- %val1 = load i8, i8* %ptr1, align 1
+ %alloca = alloca [128 x i8], align 1, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i8], [128 x i8] addrspace(5)* %alloca, i32 0, i32 %offset
+ %val0 = load i8, i8 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i8, i8 addrspace(5)* %ptr0, i32 1
+ %val1 = load i8, i8 addrspace(5)* %ptr1, align 1
%add = add i8 %val0, %val1
store i8 %add, i8 addrspace(1)* %out
ret void
}
; ALL-LABEL: @load_unknown_offset_align1_i16(
-; ALL: alloca [128 x i16], align 1{{$}}
-; UNALIGNED: load <2 x i16>, <2 x i16>* %{{[0-9]+}}, align 1{{$}}
+; ALL: alloca [128 x i16], align 1, addrspace(5){{$}}
+; UNALIGNED: load <2 x i16>, <2 x i16> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: load i16, i16* %ptr0, align 1{{$}}
-; ALIGNED: load i16, i16* %ptr1, align 1{{$}}
+; ALIGNED: load i16, i16 addrspace(5)* %ptr0, align 1{{$}}
+; ALIGNED: load i16, i16 addrspace(5)* %ptr1, align 1{{$}}
define amdgpu_kernel void @load_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i16], align 1
- %ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset
- %val0 = load i16, i16* %ptr0, align 1
- %ptr1 = getelementptr inbounds i16, i16* %ptr0, i32 1
- %val1 = load i16, i16* %ptr1, align 1
+ %alloca = alloca [128 x i16], align 1, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i16], [128 x i16] addrspace(5)* %alloca, i32 0, i32 %offset
+ %val0 = load i16, i16 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i16, i16 addrspace(5)* %ptr0, i32 1
+ %val1 = load i16, i16 addrspace(5)* %ptr1, align 1
%add = add i16 %val0, %val1
store i16 %add, i16 addrspace(1)* %out
ret void
@@ -43,16 +42,16 @@ define amdgpu_kernel void @load_unknown_offset_align1_i16(i16 addrspace(1)* noal
; ALL-LABEL: @load_unknown_offset_align1_i32(
; ALL: alloca [128 x i32], align 1
-; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
+; UNALIGNED: load <2 x i32>, <2 x i32> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: load i32, i32* %ptr0, align 1
-; ALIGNED: load i32, i32* %ptr1, align 1
+; ALIGNED: load i32, i32 addrspace(5)* %ptr0, align 1
+; ALIGNED: load i32, i32 addrspace(5)* %ptr1, align 1
define amdgpu_kernel void @load_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i32], align 1
- %ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
- %val0 = load i32, i32* %ptr0, align 1
- %ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1
- %val1 = load i32, i32* %ptr1, align 1
+ %alloca = alloca [128 x i32], align 1, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca, i32 0, i32 %offset
+ %val0 = load i32, i32 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(5)* %ptr0, i32 1
+ %val1 = load i32, i32 addrspace(5)* %ptr1, align 1
%add = add i32 %val0, %val1
store i32 %add, i32 addrspace(1)* %out
ret void
@@ -63,17 +62,17 @@ define amdgpu_kernel void @load_unknown_offset_align1_i32(i32 addrspace(1)* noal
; ALL-LABEL: @load_alloca16_unknown_offset_align1_i32(
; ALL: alloca [128 x i32], align 16
-; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
+; UNALIGNED: load <2 x i32>, <2 x i32> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
; FIXME: Should change alignment
; ALIGNED: load i32
; ALIGNED: load i32
define amdgpu_kernel void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i32], align 16
- %ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
- %val0 = load i32, i32* %ptr0, align 1
- %ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1
- %val1 = load i32, i32* %ptr1, align 1
+ %alloca = alloca [128 x i32], align 16, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca, i32 0, i32 %offset
+ %val0 = load i32, i32 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(5)* %ptr0, i32 1
+ %val1 = load i32, i32 addrspace(5)* %ptr1, align 1
%add = add i32 %val0, %val1
store i32 %add, i32 addrspace(1)* %out
ret void
@@ -81,31 +80,31 @@ define amdgpu_kernel void @load_alloca16_unknown_offset_align1_i32(i32 addrspace
; ALL-LABEL: @store_unknown_offset_align1_i8(
; ALL: alloca [128 x i8], align 1
-; UNALIGNED: store <2 x i8> <i8 9, i8 10>, <2 x i8>* %{{[0-9]+}}, align 1{{$}}
+; UNALIGNED: store <2 x i8> <i8 9, i8 10>, <2 x i8> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: store i8 9, i8* %ptr0, align 1{{$}}
-; ALIGNED: store i8 10, i8* %ptr1, align 1{{$}}
+; ALIGNED: store i8 9, i8 addrspace(5)* %ptr0, align 1{{$}}
+; ALIGNED: store i8 10, i8 addrspace(5)* %ptr1, align 1{{$}}
define amdgpu_kernel void @store_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i8], align 1
- %ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset
- store i8 9, i8* %ptr0, align 1
- %ptr1 = getelementptr inbounds i8, i8* %ptr0, i32 1
- store i8 10, i8* %ptr1, align 1
+ %alloca = alloca [128 x i8], align 1, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i8], [128 x i8] addrspace(5)* %alloca, i32 0, i32 %offset
+ store i8 9, i8 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i8, i8 addrspace(5)* %ptr0, i32 1
+ store i8 10, i8 addrspace(5)* %ptr1, align 1
ret void
}
; ALL-LABEL: @store_unknown_offset_align1_i16(
; ALL: alloca [128 x i16], align 1
-; UNALIGNED: store <2 x i16> <i16 9, i16 10>, <2 x i16>* %{{[0-9]+}}, align 1{{$}}
+; UNALIGNED: store <2 x i16> <i16 9, i16 10>, <2 x i16> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: store i16 9, i16* %ptr0, align 1{{$}}
-; ALIGNED: store i16 10, i16* %ptr1, align 1{{$}}
+; ALIGNED: store i16 9, i16 addrspace(5)* %ptr0, align 1{{$}}
+; ALIGNED: store i16 10, i16 addrspace(5)* %ptr1, align 1{{$}}
define amdgpu_kernel void @store_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i16], align 1
- %ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset
- store i16 9, i16* %ptr0, align 1
- %ptr1 = getelementptr inbounds i16, i16* %ptr0, i32 1
- store i16 10, i16* %ptr1, align 1
+ %alloca = alloca [128 x i16], align 1, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i16], [128 x i16] addrspace(5)* %alloca, i32 0, i32 %offset
+ store i16 9, i16 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i16, i16 addrspace(5)* %ptr0, i32 1
+ store i16 10, i16 addrspace(5)* %ptr1, align 1
ret void
}
@@ -115,16 +114,16 @@ define amdgpu_kernel void @store_unknown_offset_align1_i16(i16 addrspace(1)* noa
; ALL-LABEL: @store_unknown_offset_align1_i32(
; ALL: alloca [128 x i32], align 1
-; UNALIGNED: store <2 x i32> <i32 9, i32 10>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
+; UNALIGNED: store <2 x i32> <i32 9, i32 10>, <2 x i32> addrspace(5)* %{{[0-9]+}}, align 1{{$}}
-; ALIGNED: store i32 9, i32* %ptr0, align 1
-; ALIGNED: store i32 10, i32* %ptr1, align 1
+; ALIGNED: store i32 9, i32 addrspace(5)* %ptr0, align 1
+; ALIGNED: store i32 10, i32 addrspace(5)* %ptr1, align 1
define amdgpu_kernel void @store_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
- %alloca = alloca [128 x i32], align 1
- %ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
- store i32 9, i32* %ptr0, align 1
- %ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1
- store i32 10, i32* %ptr1, align 1
+ %alloca = alloca [128 x i32], align 1, addrspace(5)
+ %ptr0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca, i32 0, i32 %offset
+ store i32 9, i32 addrspace(5)* %ptr0, align 1
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(5)* %ptr0, i32 1
+ store i32 10, i32 addrspace(5)* %ptr1, align 1
ret void
}
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
index 0fcdc7b9083..43352783d10 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
@@ -5,7 +5,6 @@
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT8,ELT8-UNALIGNED,UNALIGNED,ALL %s
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16,+unaligned-scratch-access -load-store-vectorizer -S -o - %s | FileCheck -check-prefixes=ELT16,ELT16-UNALIGNED,UNALIGNED,ALL %s
-target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32
; ALIGNED: store i32
@@ -17,52 +16,52 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; ELT8-UNALIGNED: store <2 x i32>
; ELT16-UNALIGNED: store <4 x i32>
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32(i32* %out) #0 {
- %out.gep.1 = getelementptr i32, i32* %out, i32 1
- %out.gep.2 = getelementptr i32, i32* %out, i32 2
- %out.gep.3 = getelementptr i32, i32* %out, i32 3
-
- store i32 9, i32* %out
- store i32 1, i32* %out.gep.1
- store i32 23, i32* %out.gep.2
- store i32 19, i32* %out.gep.3
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32(i32 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
+
+ store i32 9, i32 addrspace(5)* %out
+ store i32 1, i32 addrspace(5)* %out.gep.1
+ store i32 23, i32 addrspace(5)* %out.gep.2
+ store i32 19, i32 addrspace(5)* %out.gep.3
ret void
}
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align1(
-; ALIGNED: store i32 9, i32* %out, align 1
-; ALIGNED: store i32 1, i32* %out.gep.1, align 1
-; ALIGNED: store i32 23, i32* %out.gep.2, align 1
-; ALIGNED: store i32 19, i32* %out.gep.3, align 1
+; ALIGNED: store i32 9, i32 addrspace(5)* %out, align 1
+; ALIGNED: store i32 1, i32 addrspace(5)* %out.gep.1, align 1
+; ALIGNED: store i32 23, i32 addrspace(5)* %out.gep.2, align 1
+; ALIGNED: store i32 19, i32 addrspace(5)* %out.gep.3, align 1
-; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32>* %1, align 1
+; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 1
-; ELT8-UNALIGNED: store <2 x i32> <i32 9, i32 1>, <2 x i32>* %1, align 1
-; ELT8-UNALIGNED: store <2 x i32> <i32 23, i32 19>, <2 x i32>* %2, align 1
+; ELT8-UNALIGNED: store <2 x i32> <i32 9, i32 1>, <2 x i32> addrspace(5)* %1, align 1
+; ELT8-UNALIGNED: store <2 x i32> <i32 23, i32 19>, <2 x i32> addrspace(5)* %2, align 1
; ELT4-UNALIGNED: store i32
; ELT4-UNALIGNED: store i32
; ELT4-UNALIGNED: store i32
; ELT4-UNALIGNED: store i32
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1(i32* %out) #0 {
- %out.gep.1 = getelementptr i32, i32* %out, i32 1
- %out.gep.2 = getelementptr i32, i32* %out, i32 2
- %out.gep.3 = getelementptr i32, i32* %out, i32 3
-
- store i32 9, i32* %out, align 1
- store i32 1, i32* %out.gep.1, align 1
- store i32 23, i32* %out.gep.2, align 1
- store i32 19, i32* %out.gep.3, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1(i32 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
+
+ store i32 9, i32 addrspace(5)* %out, align 1
+ store i32 1, i32 addrspace(5)* %out.gep.1, align 1
+ store i32 23, i32 addrspace(5)* %out.gep.2, align 1
+ store i32 19, i32 addrspace(5)* %out.gep.3, align 1
ret void
}
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align2(
-; ALIGNED: store i32 9, i32* %out, align 2
-; ALIGNED: store i32 1, i32* %out.gep.1, align 2
-; ALIGNED: store i32 23, i32* %out.gep.2, align 2
-; ALIGNED: store i32 19, i32* %out.gep.3, align 2
+; ALIGNED: store i32 9, i32 addrspace(5)* %out, align 2
+; ALIGNED: store i32 1, i32 addrspace(5)* %out.gep.1, align 2
+; ALIGNED: store i32 23, i32 addrspace(5)* %out.gep.2, align 2
+; ALIGNED: store i32 19, i32 addrspace(5)* %out.gep.3, align 2
-; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32>* %1, align 2
+; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 2
; ELT8-UNALIGNED: store <2 x i32>
; ELT8-UNALIGNED: store <2 x i32>
@@ -71,29 +70,29 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1(
; ELT4-UNALIGNED: store i32
; ELT4-UNALIGNED: store i32
; ELT4-UNALIGNED: store i32
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(i32* %out) #0 {
- %out.gep.1 = getelementptr i32, i32* %out, i32 1
- %out.gep.2 = getelementptr i32, i32* %out, i32 2
- %out.gep.3 = getelementptr i32, i32* %out, i32 3
-
- store i32 9, i32* %out, align 2
- store i32 1, i32* %out.gep.1, align 2
- store i32 23, i32* %out.gep.2, align 2
- store i32 19, i32* %out.gep.3, align 2
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(i32 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i32, i32 addrspace(5)* %out, i32 3
+
+ store i32 9, i32 addrspace(5)* %out, align 2
+ store i32 1, i32 addrspace(5)* %out.gep.1, align 2
+ store i32 23, i32 addrspace(5)* %out.gep.2, align 2
+ store i32 19, i32 addrspace(5)* %out.gep.3, align 2
ret void
}
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i8(
; ALL: store <4 x i8>
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8(i8* %out) #0 {
- %out.gep.1 = getelementptr i8, i8* %out, i32 1
- %out.gep.2 = getelementptr i8, i8* %out, i32 2
- %out.gep.3 = getelementptr i8, i8* %out, i32 3
-
- store i8 9, i8* %out, align 4
- store i8 1, i8* %out.gep.1
- store i8 23, i8* %out.gep.2
- store i8 19, i8* %out.gep.3
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8(i8 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i8, i8 addrspace(5)* %out, i32 3
+
+ store i8 9, i8 addrspace(5)* %out, align 4
+ store i8 1, i8 addrspace(5)* %out.gep.1
+ store i8 23, i8 addrspace(5)* %out.gep.2
+ store i8 19, i8 addrspace(5)* %out.gep.3
ret void
}
@@ -103,26 +102,26 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8(i8* %out
; ALIGNED: store i8
; ALIGNED: store i8
-; UNALIGNED: store <4 x i8> <i8 9, i8 1, i8 23, i8 19>, <4 x i8>* %1, align 1
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8_align1(i8* %out) #0 {
- %out.gep.1 = getelementptr i8, i8* %out, i32 1
- %out.gep.2 = getelementptr i8, i8* %out, i32 2
- %out.gep.3 = getelementptr i8, i8* %out, i32 3
+; UNALIGNED: store <4 x i8> <i8 9, i8 1, i8 23, i8 19>, <4 x i8> addrspace(5)* %1, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8_align1(i8 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i32 2
+ %out.gep.3 = getelementptr i8, i8 addrspace(5)* %out, i32 3
- store i8 9, i8* %out, align 1
- store i8 1, i8* %out.gep.1, align 1
- store i8 23, i8* %out.gep.2, align 1
- store i8 19, i8* %out.gep.3, align 1
+ store i8 9, i8 addrspace(5)* %out, align 1
+ store i8 1, i8 addrspace(5)* %out.gep.1, align 1
+ store i8 23, i8 addrspace(5)* %out.gep.2, align 1
+ store i8 19, i8 addrspace(5)* %out.gep.3, align 1
ret void
}
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16(
; ALL: store <2 x i16>
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16* %out) #0 {
- %out.gep.1 = getelementptr i16, i16* %out, i32 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
- store i16 9, i16* %out, align 4
- store i16 12, i16* %out.gep.1
+ store i16 9, i16 addrspace(5)* %out, align 4
+ store i16 12, i16 addrspace(5)* %out.gep.1
ret void
}
@@ -130,12 +129,12 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16* %o
; ALIGNED: store i16
; ALIGNED: store i16
-; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16>* %1, align 2
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(i16* %out) #0 {
- %out.gep.1 = getelementptr i16, i16* %out, i32 1
+; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 2
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(i16 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
- store i16 9, i16* %out, align 2
- store i16 12, i16* %out.gep.1, align 2
+ store i16 9, i16 addrspace(5)* %out, align 2
+ store i16 12, i16 addrspace(5)* %out.gep.1, align 2
ret void
}
@@ -143,22 +142,22 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(
; ALIGNED: store i16
; ALIGNED: store i16
-; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16>* %1, align 1
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align1(i16* %out) #0 {
- %out.gep.1 = getelementptr i16, i16* %out, i32 1
+; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 1
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align1(i16 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
- store i16 9, i16* %out, align 1
- store i16 12, i16* %out.gep.1, align 1
+ store i16 9, i16 addrspace(5)* %out, align 1
+ store i16 12, i16 addrspace(5)* %out.gep.1, align 1
ret void
}
; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align8(
-; ALL: store <2 x i16> <i16 9, i16 12>, <2 x i16>* %1, align 8
-define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align8(i16* %out) #0 {
- %out.gep.1 = getelementptr i16, i16* %out, i32 1
+; ALL: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 8
+define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align8(i16 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1
- store i16 9, i16* %out, align 8
- store i16 12, i16* %out.gep.1, align 2
+ store i16 9, i16 addrspace(5)* %out, align 8
+ store i16 12, i16 addrspace(5)* %out.gep.1, align 2
ret void
}
@@ -179,13 +178,13 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align8(
; ELT16-ALIGNED: store i32
; ELT16-UNALIGNED: store <3 x i32>
-define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32(i32* %out) #0 {
- %out.gep.1 = getelementptr i32, i32* %out, i32 1
- %out.gep.2 = getelementptr i32, i32* %out, i32 2
+define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32(i32 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
- store i32 9, i32* %out
- store i32 1, i32* %out.gep.1
- store i32 23, i32* %out.gep.2
+ store i32 9, i32 addrspace(5)* %out
+ store i32 1, i32 addrspace(5)* %out.gep.1
+ store i32 23, i32 addrspace(5)* %out.gep.2
ret void
}
@@ -202,13 +201,13 @@ define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32(i32* %o
; ELT8-UNALIGNED: store i32
; ELT16-UNALIGNED: store <3 x i32>
-define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32_align1(i32* %out) #0 {
- %out.gep.1 = getelementptr i32, i32* %out, i32 1
- %out.gep.2 = getelementptr i32, i32* %out, i32 2
+define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32_align1(i32 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
+ %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
- store i32 9, i32* %out, align 1
- store i32 1, i32* %out.gep.1, align 1
- store i32 23, i32* %out.gep.2, align 1
+ store i32 9, i32 addrspace(5)* %out, align 1
+ store i32 1, i32 addrspace(5)* %out.gep.1, align 1
+ store i32 23, i32 addrspace(5)* %out.gep.2, align 1
ret void
}
@@ -218,13 +217,13 @@ define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i32_align1(
; ALIGNED: store i8
; UNALIGNED: store <3 x i8>
-define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i8_align1(i8* %out) #0 {
- %out.gep.1 = getelementptr i8, i8* %out, i8 1
- %out.gep.2 = getelementptr i8, i8* %out, i8 2
+define amdgpu_kernel void @merge_private_store_3_vector_elts_loads_v4i8_align1(i8 addrspace(5)* %out) #0 {
+ %out.gep.1 = getelementptr i8, i8 addrspace(5)* %out, i8 1
+ %out.gep.2 = getelementptr i8, i8 addrspace(5)* %out, i8 2
- store i8 9, i8* %out, align 1
- store i8 1, i8* %out.gep.1, align 1
- store i8 23, i8* %out.gep.2, align 1
+ store i8 9, i8 addrspace(5)* %out, align 1
+ store i8 1, i8 addrspace(5)* %out.gep.1, align 1
+ store i8 23, i8 addrspace(5)* %out.gep.2, align 1
ret void
}
OpenPOWER on IntegriCloud