diff options
Diffstat (limited to 'llvm/test/Analysis/CostModel/AMDGPU/fsub.ll')
-rw-r--r-- | llvm/test/Analysis/CostModel/AMDGPU/fsub.ll | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll index e0850be9867..cb89d292f71 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll @@ -3,7 +3,7 @@ ; ALL: 'fsub_f32' ; ALL: estimated cost of 1 for {{.*}} fsub float -define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { +define amdgpu_kernel void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { %vec = load float, float addrspace(1)* %vaddr %add = fsub float %vec, %b store float %add, float addrspace(1)* %out @@ -12,7 +12,7 @@ define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, floa ; ALL: 'fsub_v2f32' ; ALL: estimated cost of 2 for {{.*}} fsub <2 x float> -define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { +define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr %add = fsub <2 x float> %vec, %b store <2 x float> %add, <2 x float> addrspace(1)* %out @@ -21,7 +21,7 @@ define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1) ; ALL: 'fsub_v3f32' ; ALL: estimated cost of 3 for {{.*}} fsub <3 x float> -define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { +define amdgpu_kernel void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr %add = fsub <3 x float> %vec, %b store <3 x float> %add, <3 x float> addrspace(1)* %out @@ -31,7 +31,7 @@ define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1) ; ALL: 'fsub_f64' ; FASTF64: estimated cost of 2 for {{.*}} fsub double ; SLOWF64: estimated cost of 3 for {{.*}} fsub double -define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { +define amdgpu_kernel void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { %vec = load double, double addrspace(1)* %vaddr %add = fsub double %vec, %b store double %add, double addrspace(1)* %out @@ -41,7 +41,7 @@ define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, do ; ALL: 'fsub_v2f64' ; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double> ; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double> -define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { +define amdgpu_kernel void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr %add = fsub <2 x double> %vec, %b store <2 x double> %add, <2 x double> addrspace(1)* %out @@ -51,7 +51,7 @@ define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace( ; ALL: 'fsub_v3f64' ; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double> ; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double> -define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { +define amdgpu_kernel void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr %add = fsub <3 x double> %vec, %b store <3 x double> %add, <3 x double> addrspace(1)* %out @@ -60,7 +60,7 @@ define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace( ; ALL: 'fsub_f16' ; ALL: estimated cost of 1 for {{.*}} fsub half -define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { +define amdgpu_kernel void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { %vec = load half, half addrspace(1)* %vaddr %add = fsub half %vec, %b store half %add, half addrspace(1)* %out @@ -69,7 +69,7 @@ define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half % ; ALL: 'fsub_v2f16' ; ALL: estimated cost of 2 for {{.*}} fsub <2 x half> -define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { +define amdgpu_kernel void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr %add = fsub <2 x half> %vec, %b store <2 x half> %add, <2 x half> addrspace(1)* %out @@ -78,7 +78,7 @@ define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* ; ALL: 'fsub_v4f16' ; ALL: estimated cost of 4 for {{.*}} fsub <4 x half> -define void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { +define amdgpu_kernel void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr %add = fsub <4 x half> %vec, %b store <4 x half> %add, <4 x half> addrspace(1)* %out |