summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/fsqrt.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/fsqrt.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.ll20
1 files changed, 10 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.ll
index b6526b8e078..a0fd3411ca0 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.ll
@@ -7,7 +7,7 @@
; FUNC-LABEL: {{^}}v_safe_fsqrt_f32:
; GCN: v_sqrt_f32_e32 {{v[0-9]+, v[0-9]+}}
-define void @v_safe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
+define amdgpu_kernel void @v_safe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
%r0 = load float, float addrspace(1)* %in
%r1 = call float @llvm.sqrt.f32(float %r0)
store float %r1, float addrspace(1)* %out
@@ -16,7 +16,7 @@ define void @v_safe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in)
; FUNC-LABEL: {{^}}v_unsafe_fsqrt_f32:
; GCN: v_sqrt_f32_e32 {{v[0-9]+, v[0-9]+}}
-define void @v_unsafe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #2 {
+define amdgpu_kernel void @v_unsafe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #2 {
%r0 = load float, float addrspace(1)* %in
%r1 = call float @llvm.sqrt.f32(float %r0)
store float %r1, float addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @v_unsafe_fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %i
; R600: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[2].Z
; R600: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].Z, PS
-define void @s_sqrt_f32(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @s_sqrt_f32(float addrspace(1)* %out, float %in) #1 {
entry:
%fdiv = call float @llvm.sqrt.f32(float %in)
store float %fdiv, float addrspace(1)* %out
@@ -44,7 +44,7 @@ entry:
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[2].W, PS
; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[3].X
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].X, PS
-define void @s_sqrt_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @s_sqrt_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%fdiv = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
store <2 x float> %fdiv, <2 x float> addrspace(1)* %out
@@ -65,7 +65,7 @@ entry:
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[3].W, PS
; R600-DAG: RECIPSQRT_CLAMPED * T{{[0-9]\.[XYZW]}}, KC0[4].X
; R600-DAG: MUL NON-IEEE T{{[0-9]\.[XYZW]}}, KC0[4].X, PS
-define void @s_sqrt_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
+define amdgpu_kernel void @s_sqrt_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %in) #1 {
entry:
%fdiv = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %in)
store <4 x float> %fdiv, <4 x float> addrspace(1)* %out
@@ -75,7 +75,7 @@ entry:
; FUNC-LABEL: {{^}}elim_redun_check_neg0:
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_neg0(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @elim_redun_check_neg0(float addrspace(1)* %out, float %in) #1 {
entry:
%sqrt = call float @llvm.sqrt.f32(float %in)
%cmp = fcmp olt float %in, -0.000000e+00
@@ -87,7 +87,7 @@ entry:
; FUNC-LABEL: {{^}}elim_redun_check_pos0:
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_pos0(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @elim_redun_check_pos0(float addrspace(1)* %out, float %in) #1 {
entry:
%sqrt = call float @llvm.sqrt.f32(float %in)
%cmp = fcmp olt float %in, 0.000000e+00
@@ -99,7 +99,7 @@ entry:
; FUNC-LABEL: {{^}}elim_redun_check_ult:
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_ult(float addrspace(1)* %out, float %in) #1 {
+define amdgpu_kernel void @elim_redun_check_ult(float addrspace(1)* %out, float %in) #1 {
entry:
%sqrt = call float @llvm.sqrt.f32(float %in)
%cmp = fcmp ult float %in, -0.000000e+00
@@ -112,7 +112,7 @@ entry:
; GCN: v_sqrt_f32_e32
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_v2(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @elim_redun_check_v2(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%sqrt = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
%cmp = fcmp olt <2 x float> %in, <float -0.000000e+00, float -0.000000e+00>
@@ -125,7 +125,7 @@ entry:
; GCN: v_sqrt_f32_e32
; GCN: v_sqrt_f32_e32
; GCN-NOT: v_cndmask
-define void @elim_redun_check_v2_ult(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
+define amdgpu_kernel void @elim_redun_check_v2_ult(<2 x float> addrspace(1)* %out, <2 x float> %in) #1 {
entry:
%sqrt = call <2 x float> @llvm.sqrt.v2f32(<2 x float> %in)
%cmp = fcmp ult <2 x float> %in, <float -0.000000e+00, float -0.000000e+00>
OpenPOWER on IntegriCloud