diff options
author | Yaxun Liu <Yaxun.Liu@amd.com> | 2018-02-02 16:07:16 +0000 |
---|---|---|
committer | Yaxun Liu <Yaxun.Liu@amd.com> | 2018-02-02 16:07:16 +0000 |
commit | 2a22c5deff3830d50fbc3f877ab30af9f42792f9 (patch) | |
tree | 25b57e509727b39c0a06715cccf5dbab3e1ea67e /llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll | |
parent | a43e9653bbb388d7fe3d58541bdf13612705cc8f (diff) | |
download | bcm5719-llvm-2a22c5deff3830d50fbc3f877ab30af9f42792f9.tar.gz bcm5719-llvm-2a22c5deff3830d50fbc3f877ab30af9f42792f9.zip |
[AMDGPU] Switch to the new addr space mapping by default
This requires corresponding clang change.
Differential Revision: https://reviews.llvm.org/D40955
llvm-svn: 324101
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll b/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll index 23bd2e4bc82..5a0d87f5186 100644 --- a/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll +++ b/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll @@ -7,49 +7,49 @@ ; GCN-LABEL: {{^}}store_private_offset_i8: ; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_i8() #0 { - store volatile i8 5, i8* inttoptr (i32 8 to i8*) + store volatile i8 5, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i16: ; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_i16() #0 { - store volatile i16 5, i16* inttoptr (i32 8 to i16*) + store volatile i16 5, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i32: ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_i32() #0 { - store volatile i32 5, i32* inttoptr (i32 8 to i32*) + store volatile i32 5, i32 addrspace(5)* inttoptr (i32 8 to i32 addrspace(5)*) ret void } ; GCN-LABEL: {{^}}store_private_offset_v2i32: ; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_v2i32() #0 { - store volatile <2 x i32> <i32 5, i32 10>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) + store volatile <2 x i32> <i32 5, i32 10>, <2 x i32> addrspace(5)* inttoptr (i32 8 to <2 x i32> addrspace(5)*) ret void } ; GCN-LABEL: {{^}}store_private_offset_v4i32: ; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @store_private_offset_v4i32() #0 { - store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) + store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32> addrspace(5)* inttoptr (i32 8 to <4 x i32> addrspace(5)*) ret void } ; GCN-LABEL: {{^}}load_private_offset_i8: ; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_i8() #0 { - %load = load volatile i8, i8* inttoptr (i32 8 to i8*) + %load = load volatile i8, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*) ret void } ; GCN-LABEL: {{^}}sextload_private_offset_i8: ; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 { - %load = load volatile i8, i8* inttoptr (i32 8 to i8*) + %load = load volatile i8, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*) %sextload = sext i8 %load to i32 store i32 %sextload, i32 addrspace(1)* undef ret void @@ -58,7 +58,7 @@ define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 ; GCN-LABEL: {{^}}zextload_private_offset_i8: ; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 { - %load = load volatile i8, i8* inttoptr (i32 8 to i8*) + %load = load volatile i8, i8 addrspace(5)* inttoptr (i32 8 to i8 addrspace(5)*) %zextload = zext i8 %load to i32 store i32 %zextload, i32 addrspace(1)* undef ret void @@ -67,14 +67,14 @@ define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 ; GCN-LABEL: {{^}}load_private_offset_i16: ; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_i16() #0 { - %load = load volatile i16, i16* inttoptr (i32 8 to i16*) + %load = load volatile i16, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*) ret void } ; GCN-LABEL: {{^}}sextload_private_offset_i16: ; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 { - %load = load volatile i16, i16* inttoptr (i32 8 to i16*) + %load = load volatile i16, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*) %sextload = sext i16 %load to i32 store i32 %sextload, i32 addrspace(1)* undef ret void @@ -83,7 +83,7 @@ define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) # ; GCN-LABEL: {{^}}zextload_private_offset_i16: ; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8 define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 { - %load = load volatile i16, i16* inttoptr (i32 8 to i16*) + %load = load volatile i16, i16 addrspace(5)* inttoptr (i32 8 to i16 addrspace(5)*) %zextload = zext i16 %load to i32 store i32 %zextload, i32 addrspace(1)* undef ret void @@ -92,28 +92,28 @@ define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) # ; GCN-LABEL: {{^}}load_private_offset_i32: ; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_i32() #0 { - %load = load volatile i32, i32* inttoptr (i32 8 to i32*) + %load = load volatile i32, i32 addrspace(5)* inttoptr (i32 8 to i32 addrspace(5)*) ret void } ; GCN-LABEL: {{^}}load_private_offset_v2i32: ; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_v2i32() #0 { - %load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) + %load = load volatile <2 x i32>, <2 x i32> addrspace(5)* inttoptr (i32 8 to <2 x i32> addrspace(5)*) ret void } ; GCN-LABEL: {{^}}load_private_offset_v4i32: ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 define amdgpu_kernel void @load_private_offset_v4i32() #0 { - %load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) + %load = load volatile <4 x i32>, <4 x i32> addrspace(5)* inttoptr (i32 8 to <4 x i32> addrspace(5)*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset: ; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:4095 define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { - store volatile i8 5, i8* inttoptr (i32 4095 to i8*) + store volatile i8 5, i8 addrspace(5)* inttoptr (i32 4095 to i8 addrspace(5)*) ret void } @@ -121,7 +121,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 ; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen{{$}} define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { - store volatile i8 5, i8* inttoptr (i32 4096 to i8*) + store volatile i8 5, i8 addrspace(5)* inttoptr (i32 4096 to i8 addrspace(5)*) ret void } @@ -129,7 +129,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 ; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen offset:1{{$}} define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 { - store volatile i8 5, i8* inttoptr (i32 4097 to i8*) + store volatile i8 5, i8 addrspace(5)* inttoptr (i32 4097 to i8 addrspace(5)*) ret void } @@ -144,11 +144,11 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 { ; GFX9: v_add_u32_e32 [[ADDR:v[0-9]+]], 4, ; GFX9: buffer_store_dword v{{[0-9]+}}, [[ADDR]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:32 define amdgpu_kernel void @store_private_unknown_bits_vaddr() #0 { - %alloca = alloca [16 x i32], align 4 + %alloca = alloca [16 x i32], align 4, addrspace(5) %vaddr = load volatile i32, i32 addrspace(1)* undef %vaddr.off = add i32 %vaddr, 8 - %gep = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 0, i32 %vaddr.off - store volatile i32 9, i32* %gep + %gep = getelementptr inbounds [16 x i32], [16 x i32] addrspace(5)* %alloca, i32 0, i32 %vaddr.off + store volatile i32 9, i32 addrspace(5)* %gep ret void } |