diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-03-25 01:00:32 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-03-25 01:00:32 +0000 |
commit | 9651813ee0d9633e12accb7ae673a8a3b944f35c (patch) | |
tree | 2101cca435e81795769c32e76b5c88c2202625c0 /llvm/test/Analysis | |
parent | efe16c8eb4fa5450fba4a1465e01620ba6c2c402 (diff) | |
download | bcm5719-llvm-9651813ee0d9633e12accb7ae673a8a3b944f35c.tar.gz bcm5719-llvm-9651813ee0d9633e12accb7ae673a8a3b944f35c.zip |
AMDGPU: Partially implement getArithmeticInstrCost for FP ops
llvm-svn: 264374
Diffstat (limited to 'llvm/test/Analysis')
-rw-r--r-- | llvm/test/Analysis/CostModel/AMDGPU/fadd.ll | 88 | ||||
-rw-r--r-- | llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll | 96 | ||||
-rw-r--r-- | llvm/test/Analysis/CostModel/AMDGPU/fmul.ll | 88 | ||||
-rw-r--r-- | llvm/test/Analysis/CostModel/AMDGPU/fsub.ll | 86 |
4 files changed, 358 insertions, 0 deletions
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll b/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll new file mode 100644 index 00000000000..00e91bd6223 --- /dev/null +++ b/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll @@ -0,0 +1,88 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s + +; ALL: 'fadd_f32' +; ALL: estimated cost of 1 for {{.*}} fadd float +define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fadd float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v2f32' +; ALL: estimated cost of 2 for {{.*}} fadd <2 x float> +define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fadd <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v3f32' +; ALL: estimated cost of 3 for {{.*}} fadd <3 x float> +define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fadd <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fadd_f64' +; FASTF64: estimated cost of 2 for {{.*}} fadd double +; SLOWF64: estimated cost of 3 for {{.*}} fadd double +define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fadd double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v2f64' +; FASTF64: estimated cost of 4 for {{.*}} fadd <2 x double> +; SLOWF64: estimated cost of 6 for {{.*}} fadd <2 x double> +define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fadd <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v3f64' +; FASTF64: estimated cost of 6 for {{.*}} fadd <3 x double> +; SLOWF64: estimated cost of 9 for {{.*}} fadd <3 x double> +define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fadd <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL 'fadd_f16' +; ALL estimated cost of 1 for {{.*}} fadd half +define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fadd half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL 'fadd_v2f16' +; ALL estimated cost of 2 for {{.*}} fadd <2 x half> +define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fadd <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL 'fadd_v4f16' +; ALL estimated cost of 4 for {{.*}} fadd <4 x half> +define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fadd <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll b/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll new file mode 100644 index 00000000000..3f374422ad9 --- /dev/null +++ b/llvm/test/Analysis/CostModel/AMDGPU/fdiv.ll @@ -0,0 +1,96 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CIFASTF64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CISLOWF64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SIFASTF64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=verde -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SISLOWF64 %s + +; CHECK: 'fdiv_f32' +; ALL: estimated cost of 10 for {{.*}} fdiv float +define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fdiv float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v2f32' +; ALL: estimated cost of 20 for {{.*}} fdiv <2 x float> +define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fdiv <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v3f32' +; ALL: estimated cost of 30 for {{.*}} fdiv <3 x float> +define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fdiv <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_f64' +; CIFASTF64: estimated cost of 29 for {{.*}} fdiv double +; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double +; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double +; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double +define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fdiv double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v2f64' +; CIFASTF64: estimated cost of 58 for {{.*}} fdiv <2 x double> +; CISLOWF64: estimated cost of 66 for {{.*}} fdiv <2 x double> +; SIFASTF64: estimated cost of 64 for {{.*}} fdiv <2 x double> +; SISLOWF64: estimated cost of 72 for {{.*}} fdiv <2 x double> +define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fdiv <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v3f64' +; CIFASTF64: estimated cost of 87 for {{.*}} fdiv <3 x double> +; CISLOWF64: estimated cost of 99 for {{.*}} fdiv <3 x double> +; SIFASTF64: estimated cost of 96 for {{.*}} fdiv <3 x double> +; SISLOWF64: estimated cost of 108 for {{.*}} fdiv <3 x double> +define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fdiv <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_f16' +; ALL: estimated cost of 10 for {{.*}} fdiv half +define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fdiv half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v2f16' +; ALL: estimated cost of 20 for {{.*}} fdiv <2 x half> +define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fdiv <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v4f16' +; ALL: estimated cost of 40 for {{.*}} fdiv <4 x half> +define void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fdiv <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll new file mode 100644 index 00000000000..6303bb7988c --- /dev/null +++ b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll @@ -0,0 +1,88 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s + +; ALL: 'fmul_f32' +; ALL: estimated cost of 1 for {{.*}} fmul float +define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fmul float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v2f32' +; ALL: estimated cost of 2 for {{.*}} fmul <2 x float> +define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fmul <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v3f32' +; ALL: estimated cost of 3 for {{.*}} fmul <3 x float> +define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fmul <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fmul_f64' +; FASTF64: estimated cost of 2 for {{.*}} fmul double +; SLOWF64: estimated cost of 3 for {{.*}} fmul double +define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fmul double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v2f64' +; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double> +; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double> +define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fmul <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v3f64' +; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double> +; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double> +define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fmul <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL 'fmul_f16' +; ALL estimated cost of 1 for {{.*}} fmul half +define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fmul half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL 'fmul_v2f16' +; ALL estimated cost of 2 for {{.*}} fmul <2 x half> +define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fmul <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL 'fmul_v4f16' +; ALL estimated cost of 4 for {{.*}} fmul <4 x half> +define void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fmul <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll new file mode 100644 index 00000000000..e0850be9867 --- /dev/null +++ b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll @@ -0,0 +1,86 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s + +; ALL: 'fsub_f32' +; ALL: estimated cost of 1 for {{.*}} fsub float +define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fsub float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v2f32' +; ALL: estimated cost of 2 for {{.*}} fsub <2 x float> +define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fsub <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v3f32' +; ALL: estimated cost of 3 for {{.*}} fsub <3 x float> +define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fsub <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_f64' +; FASTF64: estimated cost of 2 for {{.*}} fsub double +; SLOWF64: estimated cost of 3 for {{.*}} fsub double +define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fsub double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v2f64' +; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double> +; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double> +define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fsub <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v3f64' +; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double> +; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double> +define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fsub <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_f16' +; ALL: estimated cost of 1 for {{.*}} fsub half +define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fsub half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v2f16' +; ALL: estimated cost of 2 for {{.*}} fsub <2 x half> +define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fsub <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v4f16' +; ALL: estimated cost of 4 for {{.*}} fsub <4 x half> +define void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fsub <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} |