summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll74
1 files changed, 37 insertions, 37 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll b/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
index 77dd4b13498..64f0d65463b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
@@ -8,7 +8,7 @@ declare i32 @llvm.AMDGPU.bfe.i32(i32, i32, i32) nounwind readnone
; SI: v_bfe_i32
; EG: BFE_INT
; EG: encoding: [{{[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+,[x0-9a-f]+}},0xac
-define void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
+define amdgpu_kernel void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 %src1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -17,7 +17,7 @@ define void @bfe_i32_arg_arg_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i
; FUNC-LABEL: {{^}}bfe_i32_arg_arg_imm:
; SI: v_bfe_i32
; EG: BFE_INT
-define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+define amdgpu_kernel void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 123) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -26,7 +26,7 @@ define void @bfe_i32_arg_arg_imm(i32 addrspace(1)* %out, i32 %src0, i32 %src1) n
; FUNC-LABEL: {{^}}bfe_i32_arg_imm_arg:
; SI: v_bfe_i32
; EG: BFE_INT
-define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
+define amdgpu_kernel void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 123, i32 %src2) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -35,7 +35,7 @@ define void @bfe_i32_arg_imm_arg(i32 addrspace(1)* %out, i32 %src0, i32 %src2) n
; FUNC-LABEL: {{^}}bfe_i32_imm_arg_arg:
; SI: v_bfe_i32
; EG: BFE_INT
-define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
+define amdgpu_kernel void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 123, i32 %src1, i32 %src2) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -43,7 +43,7 @@ define void @bfe_i32_imm_arg_arg(i32 addrspace(1)* %out, i32 %src1, i32 %src2) n
; FUNC-LABEL: {{^}}v_bfe_print_arg:
; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 2, 8
-define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) nounwind {
+define amdgpu_kernel void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) nounwind {
%load = load i32, i32 addrspace(1)* %src0, align 4
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %load, i32 2, i32 8) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
@@ -54,7 +54,7 @@ define void @v_bfe_print_arg(i32 addrspace(1)* %out, i32 addrspace(1)* %src0) no
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+define amdgpu_kernel void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
%bfe_u32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 %src1, i32 0) nounwind readnone
store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
ret void
@@ -64,7 +64,7 @@ define void @bfe_i32_arg_0_width_reg_offset(i32 addrspace(1)* %out, i32 %src0, i
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
+define amdgpu_kernel void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i32 %src1) nounwind {
%bfe_u32 = call i32 @llvm.AMDGPU.bfe.i32(i32 %src0, i32 8, i32 0) nounwind readnone
store i32 %bfe_u32, i32 addrspace(1)* %out, align 4
ret void
@@ -74,7 +74,7 @@ define void @bfe_i32_arg_0_width_imm_offset(i32 addrspace(1)* %out, i32 %src0, i
; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI: s_endpgm
-define void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%shl = shl i32 %x, 31
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 1, i32 31)
@@ -88,7 +88,7 @@ define void @bfe_i32_test_6(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 0
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
-define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%shl = shl i32 %x, 31
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 0, i32 31)
@@ -100,7 +100,7 @@ define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
; SI: buffer_load_dword
; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1
; SI: s_endpgm
-define void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%shl = shl i32 %x, 31
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
@@ -113,7 +113,7 @@ define void @bfe_i32_test_8(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
-define void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 31, i32 1)
store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -125,7 +125,7 @@ define void @bfe_i32_test_9(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw
; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
-define void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 1, i32 31)
store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -137,7 +137,7 @@ define void @bfe_i32_test_10(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
-define void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 8, i32 24)
store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -149,7 +149,7 @@ define void @bfe_i32_test_11(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 24, v{{[0-9]+}}
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
-define void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 24, i32 8)
store i32 %bfe, i32 addrspace(1)* %out, align 4
@@ -160,7 +160,7 @@ define void @bfe_i32_test_12(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
; SI: v_ashrrev_i32_e32 {{v[0-9]+}}, 31, {{v[0-9]+}}
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
-define void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%shl = ashr i32 %x, 31
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
@@ -171,7 +171,7 @@ define void @bfe_i32_test_13(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
; SI-NOT: lshr
; SI-NOT: {{[^@]}}bfe
; SI: s_endpgm
-define void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%shl = lshr i32 %x, 31
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %shl, i32 31, i32 1)
@@ -184,7 +184,7 @@ define void @bfe_i32_test_14(i32 addrspace(1)* %out, i32 addrspace(1)* %in) noun
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 0, i32 0, i32 0) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -196,7 +196,7 @@ define void @bfe_i32_constant_fold_test_0(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 12334, i32 0, i32 0) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -208,7 +208,7 @@ define void @bfe_i32_constant_fold_test_1(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 0, i32 0, i32 1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -220,7 +220,7 @@ define void @bfe_i32_constant_fold_test_2(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 1, i32 0, i32 1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -232,7 +232,7 @@ define void @bfe_i32_constant_fold_test_3(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 4294967295, i32 0, i32 1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -244,7 +244,7 @@ define void @bfe_i32_constant_fold_test_4(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 128, i32 7, i32 1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -256,7 +256,7 @@ define void @bfe_i32_constant_fold_test_5(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 128, i32 0, i32 8) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -268,7 +268,7 @@ define void @bfe_i32_constant_fold_test_6(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 127, i32 0, i32 8) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -280,7 +280,7 @@ define void @bfe_i32_constant_fold_test_7(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 127, i32 6, i32 8) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -292,7 +292,7 @@ define void @bfe_i32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 65536, i32 16, i32 8) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -304,7 +304,7 @@ define void @bfe_i32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 65535, i32 16, i32 16) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -316,7 +316,7 @@ define void @bfe_i32_constant_fold_test_10(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 4, i32 4) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -328,7 +328,7 @@ define void @bfe_i32_constant_fold_test_11(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 31, i32 1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -340,7 +340,7 @@ define void @bfe_i32_constant_fold_test_12(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 131070, i32 16, i32 16) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -352,7 +352,7 @@ define void @bfe_i32_constant_fold_test_13(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 2, i32 30) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -364,7 +364,7 @@ define void @bfe_i32_constant_fold_test_14(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 160, i32 4, i32 28) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -376,7 +376,7 @@ define void @bfe_i32_constant_fold_test_15(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 4294967295, i32 1, i32 7) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -388,7 +388,7 @@ define void @bfe_i32_constant_fold_test_16(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 255, i32 1, i32 31) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -400,7 +400,7 @@ define void @bfe_i32_constant_fold_test_17(i32 addrspace(1)* %out) nounwind {
; SI: buffer_store_dword [[VREG]],
; SI: s_endpgm
; EG-NOT: BFE
-define void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
+define amdgpu_kernel void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
%bfe_i32 = call i32 @llvm.AMDGPU.bfe.i32(i32 255, i32 31, i32 1) nounwind readnone
store i32 %bfe_i32, i32 addrspace(1)* %out, align 4
ret void
@@ -412,7 +412,7 @@ define void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind {
; SI-NOT: v_ashr
; SI: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 0, 24
; SI: buffer_store_dword [[BFE]],
-define void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%x = load i32, i32 addrspace(1)* %in, align 4
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 0, i32 24)
%shl = shl i32 %bfe, 8
@@ -428,7 +428,7 @@ define void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
; SI: v_add_i32_e32 [[TMP1:v[0-9]+]], vcc, [[TMP0]], [[BFE]]
; SI: v_ashrrev_i32_e32 [[TMP2:v[0-9]+]], 1, [[TMP1]]
; SI: buffer_store_dword [[TMP2]]
-define void @simplify_demanded_bfe_sdiv(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+define amdgpu_kernel void @simplify_demanded_bfe_sdiv(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%src = load i32, i32 addrspace(1)* %in, align 4
%bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %src, i32 1, i32 16) nounwind readnone
%div = sdiv i32 %bfe, 2
OpenPOWER on IntegriCloud