diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/shl.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/shl.ll | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/shl.ll b/llvm/test/CodeGen/AMDGPU/shl.ll index 8844273ecc3..f6520eeb4fd 100644 --- a/llvm/test/CodeGen/AMDGPU/shl.ll +++ b/llvm/test/CodeGen/AMDGPU/shl.ll @@ -17,7 +17,7 @@ declare i32 @llvm.r600.read.tidig.x() #0 ;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} ;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { +define amdgpu_kernel void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 %a = load <2 x i32>, <2 x i32> addrspace(1)* %in %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr @@ -44,7 +44,7 @@ define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in ;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} ;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { +define amdgpu_kernel void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 %a = load <4 x i32>, <4 x i32> addrspace(1)* %in %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr @@ -57,7 +57,7 @@ define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in ; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} -define void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { +define amdgpu_kernel void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { %b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1 %a = load i16, i16 addrspace(1)* %in %b = load i16, i16 addrspace(1)* %b_ptr @@ -70,7 +70,7 @@ define void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { ; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} ; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} -define void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) { +define amdgpu_kernel void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) { %a = load i16, i16 addrspace(1)* %in %result = shl i16 %a, %b store i16 %result, i16 addrspace(1)* %out @@ -81,7 +81,7 @@ define void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) ; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} ; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} -define void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) { +define amdgpu_kernel void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) { %a = load i16, i16 addrspace(1)* %in %b.add = add i16 %b, 3 %result = shl i16 %a, %b.add @@ -92,7 +92,7 @@ define void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, ; GCN-LABEL: {{^}}shl_i16_computed_amount: ; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 3, v{{[0-9]+}} ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, [[ADD]], v{{[0-9]+}} -define void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { +define amdgpu_kernel void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() #0 %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i32 %tid %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid @@ -107,7 +107,7 @@ define void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* % ; GCN-LABEL: {{^}}shl_i16_i_s: ; GCN: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 12 -define void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) { +define amdgpu_kernel void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) { %result = shl i16 %a, 12 store i16 %result, i16 addrspace(1)* %out ret void @@ -116,7 +116,7 @@ define void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) { ; GCN-LABEL: {{^}}shl_v2i16: ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} -define void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) { +define amdgpu_kernel void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() #0 %gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid @@ -133,7 +133,7 @@ define void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} -define void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) { +define amdgpu_kernel void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() #0 %gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid %gep.out = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i32 %tid @@ -160,7 +160,7 @@ define void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in ; GCN-LABEL: {{^}}shl_i64: ; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}} ; VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} -define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { +define amdgpu_kernel void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1 %a = load i64, i64 addrspace(1)* %in %b = load i64, i64 addrspace(1)* %b_ptr @@ -199,7 +199,7 @@ define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { ;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} ;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} -define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) { +define amdgpu_kernel void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) { %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1 %a = load <2 x i64>, <2 x i64> addrspace(1)* %in %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr @@ -262,7 +262,7 @@ define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in ;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} ;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}} -define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) { +define amdgpu_kernel void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) { %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1 %a = load <4 x i64>, <4 x i64> addrspace(1)* %in %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr @@ -277,7 +277,7 @@ define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in ; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[LO_A]] ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}} -define void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) { +define amdgpu_kernel void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) { %result = shl i64 %a, 32 store i64 %result, i64 addrspace(1)* %out ret void @@ -287,7 +287,7 @@ define void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) { ; GCN-DAG: buffer_load_dword v[[LO_A:[0-9]+]], ; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[LO_A]]{{\]}} -define void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { +define amdgpu_kernel void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() #0 %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid %gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid @@ -299,7 +299,7 @@ define void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { ; FUNC-LABEL: {{^}}s_shl_constant_i64 ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} -define void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) { +define amdgpu_kernel void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) { %shl = shl i64 281474976710655, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -311,7 +311,7 @@ define void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) { ; SI-DAG: s_movk_i32 s[[KHI:[0-9]+]], 0x11e{{$}} ; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]] ; SI: buffer_store_dwordx2 -define void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { +define amdgpu_kernel void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { %a = load i64, i64 addrspace(1)* %aptr, align 8 %shl = shl i64 1231231234567, %a store i64 %shl, i64 addrspace(1)* %out, align 8 @@ -323,7 +323,7 @@ define void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) ; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x12d687{{$}} ; SI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0{{$}} ; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]] -define void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { +define amdgpu_kernel void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { %a = load i64, i64 addrspace(1)* %aptr, align 8 %shl = shl i64 1234567, %a store i64 %shl, i64 addrspace(1)* %out, align 8 @@ -332,7 +332,7 @@ define void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* ; FUNC-LABEL: {{^}}v_shl_inline_imm_64_i64: ; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, 64, {{v[0-9]+}} -define void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { +define amdgpu_kernel void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) { %a = load i64, i64 addrspace(1)* %aptr, align 8 %shl = shl i64 64, %a store i64 %shl, i64 addrspace(1)* %out, align 8 @@ -341,7 +341,7 @@ define void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* % ; FUNC-LABEL: {{^}}s_shl_inline_imm_64_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 64, s{{[0-9]+}} -define void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 64, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -349,7 +349,7 @@ define void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* % ; FUNC-LABEL: {{^}}s_shl_inline_imm_1_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1, s{{[0-9]+}} -define void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 1, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -357,7 +357,7 @@ define void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a ; FUNC-LABEL: {{^}}s_shl_inline_imm_1.0_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{[0-9]+}} -define void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 4607182418800017408, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -365,7 +365,7 @@ define void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* ; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_1.0_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{[0-9]+}} -define void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 13830554455654793216, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -373,7 +373,7 @@ define void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace( ; FUNC-LABEL: {{^}}s_shl_inline_imm_0.5_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 0.5, s{{[0-9]+}} -define void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 4602678819172646912, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -381,7 +381,7 @@ define void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* ; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_0.5_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -0.5, s{{[0-9]+}} -define void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 13826050856027422720, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -389,7 +389,7 @@ define void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace( ; FUNC-LABEL: {{^}}s_shl_inline_imm_2.0_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 2.0, s{{[0-9]+}} -define void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 4611686018427387904, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -397,7 +397,7 @@ define void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* ; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_2.0_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -2.0, s{{[0-9]+}} -define void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 13835058055282163712, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -405,7 +405,7 @@ define void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace( ; FUNC-LABEL: {{^}}s_shl_inline_imm_4.0_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 4.0, s{{[0-9]+}} -define void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 4616189618054758400, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -413,7 +413,7 @@ define void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* ; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_4.0_i64: ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -4.0, s{{[0-9]+}} -define void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 13839561654909534208, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -427,7 +427,7 @@ define void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace( ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0 ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}} ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}} -define void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 1082130432, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -439,7 +439,7 @@ define void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace( ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}} ; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]] ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}, s{{[0-9]+}} -define void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 -1065353216, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -450,7 +450,7 @@ define void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrsp ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0 ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}} ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}} -define void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 4647714815446351872, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -460,7 +460,7 @@ define void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrs ; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0 ; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}} ; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}} -define void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { +define amdgpu_kernel void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) { %shl = shl i64 13871086852301127680, %a store i64 %shl, i64 addrspace(1)* %out, align 8 ret void @@ -468,7 +468,7 @@ define void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 a ; FUNC-LABEL: {{^}}test_mul2: ; GCN: s_lshl_b32 s{{[0-9]}}, s{{[0-9]}}, 1 -define void @test_mul2(i32 %p) { +define amdgpu_kernel void @test_mul2(i32 %p) { %i = mul i32 %p, 2 store volatile i32 %i, i32 addrspace(1)* undef ret void |