summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
commit3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch)
treed74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
parentf6021ecddc73d14c94ad70938250d58f330795be (diff)
downloadbcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz
bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated the same as compute kernels. Make this explicit so the default calling convention can be changed to a non-kernel. Converted with perl -pi -e 's/define void/define amdgpu_kernel void/' on the relevant test directories (and undoing in one place that actually wanted a non-kernel). llvm-svn: 298444
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/fdiv.f64.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f64.ll22
1 files changed, 11 insertions, 11 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
index 04e1d5ac601..d16bdf43ee2 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
@@ -29,7 +29,7 @@
; GCN: v_div_fixup_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[FMAS]], [[DEN]], [[NUM]]
; GCN: buffer_store_dwordx2 [[RESULT]]
; GCN: s_endpgm
-define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+define amdgpu_kernel void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%gep.1 = getelementptr double, double addrspace(1)* %in, i32 1
%num = load volatile double, double addrspace(1)* %in
%den = load volatile double, double addrspace(1)* %gep.1
@@ -39,7 +39,7 @@ define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
}
; GCN-LABEL: {{^}}fdiv_f64_s_v:
-define void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, double %num) #0 {
+define amdgpu_kernel void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, double %num) #0 {
%den = load double, double addrspace(1)* %in
%result = fdiv double %num, %den
store double %result, double addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @fdiv_f64_s_v(double addrspace(1)* %out, double addrspace(1)* %in, d
}
; GCN-LABEL: {{^}}fdiv_f64_v_s:
-define void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, double %den) #0 {
+define amdgpu_kernel void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, double %den) #0 {
%num = load double, double addrspace(1)* %in
%result = fdiv double %num, %den
store double %result, double addrspace(1)* %out
@@ -55,14 +55,14 @@ define void @fdiv_f64_v_s(double addrspace(1)* %out, double addrspace(1)* %in, d
}
; GCN-LABEL: {{^}}fdiv_f64_s_s:
-define void @fdiv_f64_s_s(double addrspace(1)* %out, double %num, double %den) #0 {
+define amdgpu_kernel void @fdiv_f64_s_s(double addrspace(1)* %out, double %num, double %den) #0 {
%result = fdiv double %num, %den
store double %result, double addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fdiv_v2f64:
-define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) #0 {
%gep.1 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in, i32 1
%num = load <2 x double>, <2 x double> addrspace(1)* %in
%den = load <2 x double>, <2 x double> addrspace(1)* %gep.1
@@ -72,14 +72,14 @@ define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspac
}
; GCN-LABEL: {{^}}s_fdiv_v2f64:
-define void @s_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %num, <2 x double> %den) {
+define amdgpu_kernel void @s_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %num, <2 x double> %den) {
%result = fdiv <2 x double> %num, %den
store <2 x double> %result, <2 x double> addrspace(1)* %out
ret void
}
; GCN-LABEL: {{^}}v_fdiv_v4f64:
-define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
+define amdgpu_kernel void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) #0 {
%gep.1 = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
%num = load <4 x double>, <4 x double> addrspace(1)* %in
%den = load <4 x double>, <4 x double> addrspace(1)* %gep.1
@@ -89,7 +89,7 @@ define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspac
}
; GCN-LABEL: {{^}}s_fdiv_v4f64:
-define void @s_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %num, <4 x double> %den) #0 {
+define amdgpu_kernel void @s_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %num, <4 x double> %den) #0 {
%result = fdiv <4 x double> %num, %den
store <4 x double> %result, <4 x double> addrspace(1)* %out
ret void
@@ -98,7 +98,7 @@ define void @s_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %num, <4
; GCN-LABEL: {{^}}div_fast_2_x_pat_f64:
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, 0.5
; GCN: buffer_store_dwordx2 [[MUL]]
-define void @div_fast_2_x_pat_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @div_fast_2_x_pat_f64(double addrspace(1)* %out) #1 {
%x = load double, double addrspace(1)* undef
%rcp = fdiv fast double %x, 2.0
store double %rcp, double addrspace(1)* %out, align 4
@@ -110,7 +110,7 @@ define void @div_fast_2_x_pat_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0x3fb99999
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
; GCN: buffer_store_dwordx2 [[MUL]]
-define void @div_fast_k_x_pat_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @div_fast_k_x_pat_f64(double addrspace(1)* %out) #1 {
%x = load double, double addrspace(1)* undef
%rcp = fdiv fast double %x, 10.0
store double %rcp, double addrspace(1)* %out, align 4
@@ -122,7 +122,7 @@ define void @div_fast_k_x_pat_f64(double addrspace(1)* %out) #1 {
; GCN-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0xbfb99999
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
; GCN: buffer_store_dwordx2 [[MUL]]
-define void @div_fast_neg_k_x_pat_f64(double addrspace(1)* %out) #1 {
+define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(double addrspace(1)* %out) #1 {
%x = load double, double addrspace(1)* undef
%rcp = fdiv fast double %x, -10.0
store double %rcp, double addrspace(1)* %out, align 4
OpenPOWER on IntegriCloud