diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll index 004d36f00e5..59fdc8be5ce 100644 --- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll +++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll @@ -6,7 +6,7 @@ declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone ; FUNC-LABEL: {{^}}test_umul24_i32: ; GCN: v_mul_u32_u24 -define void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) { +define amdgpu_kernel void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) { entry: %0 = shl i32 %a, 8 %a_24 = lshr i32 %0, 8 @@ -22,7 +22,7 @@ entry: ; SI: v_bfe_i32 v{{[0-9]}}, [[VI_MUL]], 0, 16 ; VI: s_mul_i32 [[SI_MUL:s[0-9]]], s{{[0-9]}}, s{{[0-9]}} ; VI: s_sext_i32_i16 s{{[0-9]}}, [[SI_MUL]] -define void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) { +define amdgpu_kernel void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) { entry: %mul = mul i16 %a, %b %ext = sext i16 %mul to i32 @@ -34,7 +34,7 @@ entry: ; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}} ; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}} ; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16 -define void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) { +define amdgpu_kernel void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) { %tid.x = call i32 @llvm.amdgcn.workitem.id.x() %tid.y = call i32 @llvm.amdgcn.workitem.id.y() %ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x @@ -54,7 +54,7 @@ define void @test_umul24_i16_vgpr_sext(i32 addrspace(1)* %out, i16 addrspace(1)* ; VI: s_mul_i32 ; VI: s_and_b32 ; VI: v_mov_b32_e32 -define void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) { +define amdgpu_kernel void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) { entry: %mul = mul i16 %a, %b %ext = zext i16 %mul to i32 @@ -66,7 +66,7 @@ entry: ; SI: v_mul_u32_u24_e32 ; SI: v_and_b32_e32 ; VI: v_mul_lo_u16 -define void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) { +define amdgpu_kernel void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) { %tid.x = call i32 @llvm.amdgcn.workitem.id.x() %tid.y = call i32 @llvm.amdgcn.workitem.id.y() %ptr_a = getelementptr i16, i16 addrspace(1)* %in, i32 %tid.x @@ -83,7 +83,7 @@ define void @test_umul24_i16_vgpr(i32 addrspace(1)* %out, i16 addrspace(1)* %in) ; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}} ; VI: v_mul_lo_u16_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}} ; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8 -define void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) { +define amdgpu_kernel void @test_umul24_i8_vgpr(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b) { entry: %tid.x = call i32 @llvm.amdgcn.workitem.id.x() %tid.y = call i32 @llvm.amdgcn.workitem.id.y() @@ -101,7 +101,7 @@ entry: ; GCN-NOT: and ; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]], ; GCN-NEXT: buffer_store_dword [[RESULT]] -define void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) { +define amdgpu_kernel void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) { entry: %a.24 = and i32 %a, 16777215 %b.24 = and i32 %b, 16777215 @@ -118,7 +118,7 @@ entry: ; GCN-NOT: and ; GCN: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]], ; GCN-NEXT: buffer_store_dword [[RESULT]] -define void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) { +define amdgpu_kernel void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) { entry: %a.24 = and i64 %a, 16777215 %b.24 = and i64 %b, 16777215 @@ -136,7 +136,7 @@ entry: ; GCN-DAG: v_mul_u32_u24_e32 ; GCN-DAG: v_mul_hi_u32_u24_e32 ; GCN: buffer_store_dwordx2 -define void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) { +define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) { entry: %tmp0 = shl i64 %a, 40 %a_24 = lshr i64 %tmp0, 40 @@ -152,7 +152,7 @@ entry: ; GCN-NOT: s_and_b32 ; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]] ; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]] -define void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) { +define amdgpu_kernel void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) { entry: %tmp0 = shl i64 %a, 40 %a.24 = lshr i64 %tmp0, 40 @@ -166,7 +166,7 @@ entry: ; GCN: s_and_b32 ; GCN: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]] ; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]] -define void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) { +define amdgpu_kernel void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) { entry: %a.16 = and i32 %a, 65535 %b.16 = and i32 %b, 65535 @@ -186,7 +186,7 @@ entry: ; GCN-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]], ; GCN-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]] ; GCN: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}} -define void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) { +define amdgpu_kernel void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) { entry: %tmp0 = shl i33 %a, 9 %a_24 = lshr i33 %tmp0, 9 @@ -206,7 +206,7 @@ entry: ; GCN: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]], ; GCN-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]] ; GCN-NEXT: buffer_store_dword v[[HI]] -define void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) { +define amdgpu_kernel void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) { entry: %tmp0 = shl i33 %a, 9 %a_24 = lshr i33 %tmp0, 9 |