summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll84
1 files changed, 42 insertions, 42 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
index e1edc26d7ed..a82b310726a 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
@@ -9,7 +9,7 @@ declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>) #0
; GCN-LABEL: {{^}}v_test_canonicalize_var_f16:
; GCN: v_mul_f16_e32 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}}
; GCN: buffer_store_short [[REG]]
-define void @v_test_canonicalize_var_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_var_f16(half addrspace(1)* %out) #1 {
%val = load half, half addrspace(1)* %out
%canonicalized = call half @llvm.canonicalize.f16(half %val)
store half %canonicalized, half addrspace(1)* %out
@@ -19,7 +19,7 @@ define void @v_test_canonicalize_var_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}s_test_canonicalize_var_f16:
; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, {{s[0-9]+}}
; GCN: buffer_store_short [[REG]]
-define void @s_test_canonicalize_var_f16(half addrspace(1)* %out, i16 zeroext %val.arg) #1 {
+define amdgpu_kernel void @s_test_canonicalize_var_f16(half addrspace(1)* %out, i16 zeroext %val.arg) #1 {
%val = bitcast i16 %val.arg to half
%canonicalized = call half @llvm.canonicalize.f16(half %val)
store half %canonicalized, half addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @s_test_canonicalize_var_f16(half addrspace(1)* %out, i16 zeroext %v
; GCN-LABEL: {{^}}v_test_canonicalize_fabs_var_f16:
; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, |{{v[0-9]+}}|
; GCN: buffer_store_short [[REG]]
-define void @v_test_canonicalize_fabs_var_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_fabs_var_f16(half addrspace(1)* %out) #1 {
%val = load half, half addrspace(1)* %out
%val.fabs = call half @llvm.fabs.f16(half %val)
%canonicalized = call half @llvm.canonicalize.f16(half %val.fabs)
@@ -40,7 +40,7 @@ define void @v_test_canonicalize_fabs_var_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}v_test_canonicalize_fneg_fabs_var_f16:
; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, -|{{v[0-9]+}}|
; GCN: buffer_store_short [[REG]]
-define void @v_test_canonicalize_fneg_fabs_var_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_f16(half addrspace(1)* %out) #1 {
%val = load half, half addrspace(1)* %out
%val.fabs = call half @llvm.fabs.f16(half %val)
%val.fabs.fneg = fsub half -0.0, %val.fabs
@@ -52,7 +52,7 @@ define void @v_test_canonicalize_fneg_fabs_var_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}v_test_canonicalize_fneg_var_f16:
; GCN: v_mul_f16_e64 [[REG:v[0-9]+]], 1.0, -{{v[0-9]+}}
; GCN: buffer_store_short [[REG]]
-define void @v_test_canonicalize_fneg_var_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_fneg_var_f16(half addrspace(1)* %out) #1 {
%val = load half, half addrspace(1)* %out
%val.fneg = fsub half -0.0, %val
%canonicalized = call half @llvm.canonicalize.f16(half %val.fneg)
@@ -63,7 +63,7 @@ define void @v_test_canonicalize_fneg_var_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_p0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_p0_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p0_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -72,7 +72,7 @@ define void @test_fold_canonicalize_p0_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff8000{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_n0_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n0_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half -0.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -81,7 +81,7 @@ define void @test_fold_canonicalize_n0_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_p1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_p1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 1.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -90,7 +90,7 @@ define void @test_fold_canonicalize_p1_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffbc00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_n1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half -1.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -99,7 +99,7 @@ define void @test_fold_canonicalize_n1_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_literal_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_literal_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_literal_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 16.0)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -108,7 +108,7 @@ define void @test_fold_canonicalize_literal_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_default_denormals_fold_canonicalize_denormal0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_default_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH03FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -117,7 +117,7 @@ define void @test_default_denormals_fold_canonicalize_denormal0_f16(half addrspa
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal0_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %out) #3 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH03FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -126,7 +126,7 @@ define void @test_denormals_fold_canonicalize_denormal0_f16(half addrspace(1)* %
; GCN-LABEL: {{^}}test_default_denormals_fold_canonicalize_denormal1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff83ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_default_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH83FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -135,7 +135,7 @@ define void @test_default_denormals_fold_canonicalize_denormal1_f16(half addrspa
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff83ff{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %out) #3 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH83FF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -144,7 +144,7 @@ define void @test_denormals_fold_canonicalize_denormal1_f16(half addrspace(1)* %
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7c00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_qnan_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH7C00)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -153,7 +153,7 @@ define void @test_fold_canonicalize_qnan_f16(half addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg1_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg1_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half bitcast (i16 -1 to half))
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -162,7 +162,7 @@ define void @test_fold_canonicalize_qnan_value_neg1_f16(half addrspace(1)* %out)
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg2_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg2_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half bitcast (i16 -2 to half))
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -171,7 +171,7 @@ define void @test_fold_canonicalize_qnan_value_neg2_f16(half addrspace(1)* %out)
; GCN-LABEL: {{^}}test_fold_canonicalize_snan0_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan0_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan0_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH7C01)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -180,7 +180,7 @@ define void @test_fold_canonicalize_snan0_value_f16(half addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan1_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan1_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan1_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xH7DFF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -189,7 +189,7 @@ define void @test_fold_canonicalize_snan1_value_f16(half addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan2_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan2_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan2_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xHFDFF)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -198,7 +198,7 @@ define void @test_fold_canonicalize_snan2_value_f16(half addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan3_value_f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e00{{$}}
; GCN: buffer_store_short [[REG]]
-define void @test_fold_canonicalize_snan3_value_f16(half addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan3_value_f16(half addrspace(1)* %out) #1 {
%canonicalized = call half @llvm.canonicalize.f16(half 0xHFC01)
store half %canonicalized, half addrspace(1)* %out
ret void
@@ -211,7 +211,7 @@ define void @test_fold_canonicalize_snan3_value_f16(half addrspace(1)* %out) #1
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{v[0-9]+$}}
; GFX9: buffer_store_dword [[REG]]
-define void @v_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
%val = load <2 x half>, <2 x half> addrspace(1)* %out
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
@@ -229,7 +229,7 @@ define void @v_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
; GFX9: v_and_b32_e32 [[ABS:v[0-9]+]], 0x7fff7fff, v{{[0-9]+}}
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, [[ABS]]{{$}}
; GCN: buffer_store_dword
-define void @v_test_canonicalize_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
%val = load <2 x half>, <2 x half> addrspace(1)* %out
%val.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val.fabs)
@@ -246,7 +246,7 @@ define void @v_test_canonicalize_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #
; GFX9: v_and_b32_e32 [[ABS:v[0-9]+]], 0x7fff7fff, v{{[0-9]+}}
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, [[ABS]] neg_lo:[0,1] neg_hi:[0,1]{{$}}
; GCN: buffer_store_dword
-define void @v_test_canonicalize_fneg_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
%val = load <2 x half>, <2 x half> addrspace(1)* %out
%val.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %val)
%val.fabs.fneg = fsub <2 x half> <half -0.0, half -0.0>, %val.fabs
@@ -265,7 +265,7 @@ define void @v_test_canonicalize_fneg_fabs_var_v2f16(<2 x half> addrspace(1)* %o
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{v[0-9]+}} neg_lo:[0,1] neg_hi:[0,1]{{$}}
; GFX9: buffer_store_dword [[REG]]
-define void @v_test_canonicalize_fneg_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2f16(<2 x half> addrspace(1)* %out) #1 {
%val = load <2 x half>, <2 x half> addrspace(1)* %out
%fneg.val = fsub <2 x half> <half -0.0, half -0.0>, %val
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %fneg.val)
@@ -280,7 +280,7 @@ define void @v_test_canonicalize_fneg_var_v2f16(<2 x half> addrspace(1)* %out) #
; GFX9: v_pk_mul_f16 [[REG:v[0-9]+]], 1.0, {{s[0-9]+$}}
; GFX9: buffer_store_dword [[REG]]
-define void @s_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out, i32 zeroext %val.arg) #1 {
+define amdgpu_kernel void @s_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out, i32 zeroext %val.arg) #1 {
%val = bitcast i32 %val.arg to <2 x half>
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
@@ -290,7 +290,7 @@ define void @s_test_canonicalize_var_v2f16(<2 x half> addrspace(1)* %out, i32 ze
; GCN-LABEL: {{^}}test_fold_canonicalize_p0_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_p0_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p0_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> zeroinitializer)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -299,7 +299,7 @@ define void @test_fold_canonicalize_p0_v2f16(<2 x half> addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n0_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x80008000{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_n0_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n0_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half -0.0, half -0.0>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -308,7 +308,7 @@ define void @test_fold_canonicalize_n0_v2f16(<2 x half> addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_p1_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c003c00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_p1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_p1_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 1.0, half 1.0>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -317,7 +317,7 @@ define void @test_fold_canonicalize_p1_v2f16(<2 x half> addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_n1_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbc00bc00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_n1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_n1_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half -1.0, half -1.0>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -326,7 +326,7 @@ define void @test_fold_canonicalize_n1_v2f16(<2 x half> addrspace(1)* %out) #1 {
; GCN-LABEL: {{^}}test_fold_canonicalize_literal_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4c004c00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_literal_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_literal_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 16.0, half 16.0>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -335,7 +335,7 @@ define void @test_fold_canonicalize_literal_v2f16(<2 x half> addrspace(1)* %out)
; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal0_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff03ff{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_no_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH03FF, half 0xH03FF>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -344,7 +344,7 @@ define void @test_no_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addr
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal0_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3ff03ff{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspace(1)* %out) #3 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH03FF, half 0xH03FF>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -353,7 +353,7 @@ define void @test_denormals_fold_canonicalize_denormal0_v2f16(<2 x half> addrspa
; GCN-LABEL: {{^}}test_no_denormals_fold_canonicalize_denormal1_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x83ff83ff{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_no_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH83FF, half 0xH83FF>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -362,7 +362,7 @@ define void @test_no_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addr
; GCN-LABEL: {{^}}test_denormals_fold_canonicalize_denormal1_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x83ff83ff{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspace(1)* %out) #3 {
+define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspace(1)* %out) #3 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH83FF, half 0xH83FF>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -371,7 +371,7 @@ define void @test_denormals_fold_canonicalize_denormal1_v2f16(<2 x half> addrspa
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7c007c00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_qnan_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH7C00, half 0xH7C00>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -380,7 +380,7 @@ define void @test_fold_canonicalize_qnan_v2f16(<2 x half> addrspace(1)* %out) #1
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg1_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg1_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> bitcast (i32 -1 to <2 x half>))
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -389,7 +389,7 @@ define void @test_fold_canonicalize_qnan_value_neg1_v2f16(<2 x half> addrspace(1
; GCN-LABEL: {{^}}test_fold_canonicalize_qnan_value_neg2_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_qnan_value_neg2_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half bitcast (i16 -2 to half), half bitcast (i16 -2 to half)>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -398,7 +398,7 @@ define void @test_fold_canonicalize_qnan_value_neg2_v2f16(<2 x half> addrspace(1
; GCN-LABEL: {{^}}test_fold_canonicalize_snan0_value_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan0_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan0_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH7C01, half 0xH7C01>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -407,7 +407,7 @@ define void @test_fold_canonicalize_snan0_value_v2f16(<2 x half> addrspace(1)* %
; GCN-LABEL: {{^}}test_fold_canonicalize_snan1_value_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan1_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan1_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xH7DFF, half 0xH7DFF>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -416,7 +416,7 @@ define void @test_fold_canonicalize_snan1_value_v2f16(<2 x half> addrspace(1)* %
; GCN-LABEL: {{^}}test_fold_canonicalize_snan2_value_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan2_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan2_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xHFDFF, half 0xHFDFF>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
@@ -425,7 +425,7 @@ define void @test_fold_canonicalize_snan2_value_v2f16(<2 x half> addrspace(1)* %
; GCN-LABEL: {{^}}test_fold_canonicalize_snan3_value_v2f16:
; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x7e007e00{{$}}
; GCN: buffer_store_dword [[REG]]
-define void @test_fold_canonicalize_snan3_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
+define amdgpu_kernel void @test_fold_canonicalize_snan3_value_v2f16(<2 x half> addrspace(1)* %out) #1 {
%canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> <half 0xHFC01, half 0xHFC01>)
store <2 x half> %canonicalized, <2 x half> addrspace(1)* %out
ret void
OpenPOWER on IntegriCloud