summaryrefslogtreecommitdiffstats
path: root/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/Analysis/CostModel/AMDGPU/fabs.ll')
-rw-r--r--llvm/test/Analysis/CostModel/AMDGPU/fabs.ll18
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll b/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll
index 9c551ec8afe..0d49e2967d2 100644
--- a/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll
+++ b/llvm/test/Analysis/CostModel/AMDGPU/fabs.ll
@@ -2,7 +2,7 @@
; CHECK: 'fabs_f32'
; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32
-define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
%vec = load float, float addrspace(1)* %vaddr
%fabs = call float @llvm.fabs.f32(float %vec) #1
store float %fabs, float addrspace(1)* %out
@@ -11,7 +11,7 @@ define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
; CHECK: 'fabs_v2f32'
; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32
-define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
%vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1
store <2 x float> %fabs, <2 x float> addrspace(1)* %out
@@ -20,7 +20,7 @@ define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)
; CHECK: 'fabs_v3f32'
; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32
-define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
%vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
%fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1
store <3 x float> %fabs, <3 x float> addrspace(1)* %out
@@ -29,7 +29,7 @@ define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)
; CHECK: 'fabs_f64'
; CHECK: estimated cost of 0 for {{.*}} call double @llvm.fabs.f64
-define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
%vec = load double, double addrspace(1)* %vaddr
%fabs = call double @llvm.fabs.f64(double %vec) #1
store double %fabs, double addrspace(1)* %out
@@ -38,7 +38,7 @@ define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0
; CHECK: 'fabs_v2f64'
; CHECK: estimated cost of 0 for {{.*}} call <2 x double> @llvm.fabs.v2f64
-define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
%vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %vec) #1
store <2 x double> %fabs, <2 x double> addrspace(1)* %out
@@ -47,7 +47,7 @@ define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(
; CHECK: 'fabs_v3f64'
; CHECK: estimated cost of 0 for {{.*}} call <3 x double> @llvm.fabs.v3f64
-define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 {
%vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
%fabs = call <3 x double> @llvm.fabs.v3f64(<3 x double> %vec) #1
store <3 x double> %fabs, <3 x double> addrspace(1)* %out
@@ -56,7 +56,7 @@ define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(
; CHECK: 'fabs_f16'
; CHECK: estimated cost of 0 for {{.*}} call half @llvm.fabs.f16
-define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
%vec = load half, half addrspace(1)* %vaddr
%fabs = call half @llvm.fabs.f16(half %vec) #1
store half %fabs, half addrspace(1)* %out
@@ -65,7 +65,7 @@ define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
; CHECK: 'fabs_v2f16'
; CHECK: estimated cost of 0 for {{.*}} call <2 x half> @llvm.fabs.v2f16
-define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
%vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
%fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %vec) #1
store <2 x half> %fabs, <2 x half> addrspace(1)* %out
@@ -74,7 +74,7 @@ define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)*
; CHECK: 'fabs_v3f16'
; CHECK: estimated cost of 0 for {{.*}} call <3 x half> @llvm.fabs.v3f16
-define void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 {
+define amdgpu_kernel void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 {
%vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr
%fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %vec) #1
store <3 x half> %fabs, <3 x half> addrspace(1)* %out
OpenPOWER on IntegriCloud