summaryrefslogtreecommitdiffstats
path: root/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-03-21 21:39:51 +0000
commit3dbeefa978fb7e7b231b249f9cd90c67b9e83277 (patch)
treed74bf7fe30e44588d573919f3625edacb2586112 /llvm/test/Analysis/CostModel/AMDGPU/fmul.ll
parentf6021ecddc73d14c94ad70938250d58f330795be (diff)
downloadbcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.tar.gz
bcm5719-llvm-3dbeefa978fb7e7b231b249f9cd90c67b9e83277.zip
AMDGPU: Mark all unspecified CC functions in tests as amdgpu_kernel
Currently the default C calling convention functions are treated the same as compute kernels. Make this explicit so the default calling convention can be changed to a non-kernel. Converted with perl -pi -e 's/define void/define amdgpu_kernel void/' on the relevant test directories (and undoing in one place that actually wanted a non-kernel). llvm-svn: 298444
Diffstat (limited to 'llvm/test/Analysis/CostModel/AMDGPU/fmul.ll')
-rw-r--r--llvm/test/Analysis/CostModel/AMDGPU/fmul.ll18
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll
index 6303bb7988c..915c35a23b3 100644
--- a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll
+++ b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll
@@ -3,7 +3,7 @@
; ALL: 'fmul_f32'
; ALL: estimated cost of 1 for {{.*}} fmul float
-define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+define amdgpu_kernel void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
%vec = load float, float addrspace(1)* %vaddr
%add = fmul float %vec, %b
store float %add, float addrspace(1)* %out
@@ -12,7 +12,7 @@ define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa
; ALL: 'fmul_v2f32'
; ALL: estimated cost of 2 for {{.*}} fmul <2 x float>
-define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+define amdgpu_kernel void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%add = fmul <2 x float> %vec, %b
store <2 x float> %add, <2 x float> addrspace(1)* %out
@@ -21,7 +21,7 @@ define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; ALL: 'fmul_v3f32'
; ALL: estimated cost of 3 for {{.*}} fmul <3 x float>
-define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+define amdgpu_kernel void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%add = fmul <3 x float> %vec, %b
store <3 x float> %add, <3 x float> addrspace(1)* %out
@@ -31,7 +31,7 @@ define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; ALL: 'fmul_f64'
; FASTF64: estimated cost of 2 for {{.*}} fmul double
; SLOWF64: estimated cost of 3 for {{.*}} fmul double
-define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+define amdgpu_kernel void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
%vec = load double, double addrspace(1)* %vaddr
%add = fmul double %vec, %b
store double %add, double addrspace(1)* %out
@@ -41,7 +41,7 @@ define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do
; ALL: 'fmul_v2f64'
; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double>
; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double>
-define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+define amdgpu_kernel void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%add = fmul <2 x double> %vec, %b
store <2 x double> %add, <2 x double> addrspace(1)* %out
@@ -51,7 +51,7 @@ define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; ALL: 'fmul_v3f64'
; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double>
; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double>
-define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+define amdgpu_kernel void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%add = fmul <3 x double> %vec, %b
store <3 x double> %add, <3 x double> addrspace(1)* %out
@@ -60,7 +60,7 @@ define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; ALL 'fmul_f16'
; ALL estimated cost of 1 for {{.*}} fmul half
-define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+define amdgpu_kernel void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
%vec = load half, half addrspace(1)* %vaddr
%add = fmul half %vec, %b
store half %add, half addrspace(1)* %out
@@ -69,7 +69,7 @@ define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %
; ALL 'fmul_v2f16'
; ALL estimated cost of 2 for {{.*}} fmul <2 x half>
-define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+define amdgpu_kernel void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%add = fmul <2 x half> %vec, %b
store <2 x half> %add, <2 x half> addrspace(1)* %out
@@ -78,7 +78,7 @@ define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; ALL 'fmul_v4f16'
; ALL estimated cost of 4 for {{.*}} fmul <4 x half>
-define void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+define amdgpu_kernel void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
%vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
%add = fmul <4 x half> %vec, %b
store <4 x half> %add, <4 x half> addrspace(1)* %out
OpenPOWER on IntegriCloud