diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/trunc.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/trunc.ll | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/trunc.ll b/llvm/test/CodeGen/AMDGPU/trunc.ll index a8f10cc5a9c..0c91d52df0c 100644 --- a/llvm/test/CodeGen/AMDGPU/trunc.ll +++ b/llvm/test/CodeGen/AMDGPU/trunc.ll @@ -4,7 +4,7 @@ declare i32 @llvm.r600.read.tidig.x() nounwind readnone -define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) { +define amdgpu_kernel void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) { ; GCN-LABEL: {{^}}trunc_i64_to_i32_store: ; GCN: s_load_dword [[SLOAD:s[0-9]+]], s[0:1], ; GCN: v_mov_b32_e32 [[VLOAD:v[0-9]+]], [[SLOAD]] @@ -28,7 +28,7 @@ define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) { ; SI: buffer_store_dword [[VSHL]] ; VI: flat_store_dword v[{{[0-9:]+}}], [[VSHL]] -define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) { +define amdgpu_kernel void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) { %b = shl i64 %a, 2 %result = trunc i64 %b to i32 store i32 %result, i32 addrspace(1)* %out, align 4 @@ -46,7 +46,7 @@ define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) { ; VI: flat_store_dword v[{{[0-9:]+}}], v[[LO_VREG]] ; GCN: v_mov_b32_e32 ; GCN: v_mov_b32_e32 -define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) { +define amdgpu_kernel void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) { %aa = add i64 %a, 234 ; Prevent shrinking store. %b = shl i64 %aa, 2 %result = trunc i64 %b to i32 @@ -57,7 +57,7 @@ define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 ; GCN-LABEL: {{^}}trunc_i32_to_i1: ; GCN: v_and_b32_e32 [[VREG:v[0-9]+]], 1, v{{[0-9]+}} -define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) { +define amdgpu_kernel void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) { %a = load i32, i32 addrspace(1)* %ptr, align 4 %trunc = trunc i32 %a to i1 %result = select i1 %trunc, i32 1, i32 0 @@ -67,7 +67,7 @@ define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) { ; GCN-LABEL: {{^}}trunc_i8_to_i1: ; GCN: v_and_b32_e32 [[VREG:v[0-9]+]], 1, v{{[0-9]+}} -define void @trunc_i8_to_i1(i8 addrspace(1)* %out, i8 addrspace(1)* %ptr) { +define amdgpu_kernel void @trunc_i8_to_i1(i8 addrspace(1)* %out, i8 addrspace(1)* %ptr) { %a = load i8, i8 addrspace(1)* %ptr, align 4 %trunc = trunc i8 %a to i1 %result = select i1 %trunc, i8 1, i8 0 @@ -77,7 +77,7 @@ define void @trunc_i8_to_i1(i8 addrspace(1)* %out, i8 addrspace(1)* %ptr) { ; GCN-LABEL: {{^}}sgpr_trunc_i16_to_i1: ; GCN: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1 -define void @sgpr_trunc_i16_to_i1(i16 addrspace(1)* %out, i16 %a) { +define amdgpu_kernel void @sgpr_trunc_i16_to_i1(i16 addrspace(1)* %out, i16 %a) { %trunc = trunc i16 %a to i1 %result = select i1 %trunc, i16 1, i16 0 store i16 %result, i16 addrspace(1)* %out, align 4 @@ -86,7 +86,7 @@ define void @sgpr_trunc_i16_to_i1(i16 addrspace(1)* %out, i16 %a) { ; GCN-LABEL: {{^}}sgpr_trunc_i32_to_i1: ; GCN: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1 -define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) { +define amdgpu_kernel void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) { %trunc = trunc i32 %a to i1 %result = select i1 %trunc, i32 1, i32 0 store i32 %result, i32 addrspace(1)* %out, align 4 @@ -99,7 +99,7 @@ define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) { ; GCN: s_and_b32 [[MASKED:s[0-9]+]], 1, s[[SLO]] ; GCN: v_cmp_eq_u32_e64 s{{\[}}[[VLO:[0-9]+]]:[[VHI:[0-9]+]]], [[MASKED]], 1{{$}} ; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, s{{\[}}[[VLO]]:[[VHI]]] -define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) { +define amdgpu_kernel void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) { %trunc = trunc i64 %x to i1 %sel = select i1 %trunc, i32 63, i32 -12 store i32 %sel, i32 addrspace(1)* %out @@ -112,7 +112,7 @@ define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) { ; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 1, v[[VLO]] ; GCN: v_cmp_eq_u32_e32 vcc, 1, [[MASKED]] ; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc -define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) { +define amdgpu_kernel void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid |