diff options
| author | Nemanja Ivanovic <nemanja.i.ibm@gmail.com> | 2015-05-21 19:32:49 +0000 |
|---|---|---|
| committer | Nemanja Ivanovic <nemanja.i.ibm@gmail.com> | 2015-05-21 19:32:49 +0000 |
| commit | f02def6cbc6c72d5e3433a6567161c88d7f21615 (patch) | |
| tree | 897fe578d16efe96a6fa4030d5e8e4baab9259e1 /llvm/test/CodeGen/PowerPC | |
| parent | 0709a7bd1a75191a13580ca2a505fa0f7c4326bd (diff) | |
| download | bcm5719-llvm-f02def6cbc6c72d5e3433a6567161c88d7f21615.tar.gz bcm5719-llvm-f02def6cbc6c72d5e3433a6567161c88d7f21615.zip | |
Add support for VSX scalar single-precision arithmetic in the PPC target
http://reviews.llvm.org/D9891
Following up on the VSX single precision loads and stores added earlier, this
adds support for elementary arithmetic operations on single precision values
in VSX registers. These instructions utilize the new VSSRC register class.
Instructions added:
xsaddsp
xsdivsp
xsmulsp
xsresp
xsrsqrtesp
xssqrtsp
xssubsp
llvm-svn: 237937
Diffstat (limited to 'llvm/test/CodeGen/PowerPC')
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll | 120 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vsx-recip-est.ll | 62 |
2 files changed, 182 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll b/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll new file mode 100644 index 00000000000..d8f76bb989e --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vsx-elementary-arith.ll @@ -0,0 +1,120 @@ +; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 | FileCheck %s +; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 | FileCheck %s +@a = global float 3.000000e+00, align 4 +@b = global float 4.000000e+00, align 4 +@c = global double 3.000000e+00, align 8 +@d = global double 4.000000e+00, align 8 + +; Function Attrs: nounwind +define float @emit_xsaddsp() { +entry: + %0 = load float, float* @a, align 4 + %1 = load float, float* @b, align 4 + %add = fadd float %0, %1 + ret float %add +; CHECK-LABEL: @emit_xsaddsp +; CHECK: xsaddsp {{[0-9]+}} +} + +; Function Attrs: nounwind +define float @emit_xssubsp() { +entry: + %0 = load float, float* @a, align 4 + %1 = load float, float* @b, align 4 + %sub = fsub float %0, %1 + ret float %sub +; CHECK-LABEL: @emit_xssubsp +; CHECK: xssubsp {{[0-9]+}} +} + +; Function Attrs: nounwind +define float @emit_xsdivsp() { +entry: + %0 = load float, float* @a, align 4 + %1 = load float, float* @b, align 4 + %div = fdiv float %0, %1 + ret float %div +; CHECK-LABEL: @emit_xsdivsp +; CHECK: xsdivsp {{[0-9]+}} +} + +; Function Attrs: nounwind +define float @emit_xsmulsp() { +entry: + %0 = load float, float* @a, align 4 + %1 = load float, float* @b, align 4 + %mul = fmul float %0, %1 + ret float %mul +; CHECK-LABEL: @emit_xsmulsp +; CHECK: xsmulsp {{[0-9]+}} +} + +; Function Attrs: nounwind +define float @emit_xssqrtsp() { +entry: + %0 = load float, float* @b, align 4 + %call = call float @sqrtf(float %0) + ret float %call +; CHECK-LABEL: @emit_xssqrtsp +; CHECK: xssqrtsp {{[0-9]+}} +} + +; Function Attrs: nounwind +declare float @sqrtf(float) + +; Function Attrs: nounwind +define double @emit_xsadddp() { +entry: + %0 = load double, double* @c, align 8 + %1 = load double, double* @d, align 8 + %add = fadd double %0, %1 + ret double %add +; CHECK-LABEL: @emit_xsadddp +; CHECK: xsadddp {{[0-9]+}} +} + +; Function Attrs: nounwind +define double @emit_xssubdp() { +entry: + %0 = load double, double* @c, align 8 + %1 = load double, double* @d, align 8 + %sub = fsub double %0, %1 + ret double %sub +; CHECK-LABEL: @emit_xssubdp +; CHECK: xssubdp {{[0-9]+}} +} + +; Function Attrs: nounwind +define double @emit_xsdivdp() { +entry: + %0 = load double, double* @c, align 8 + %1 = load double, double* @d, align 8 + %div = fdiv double %0, %1 + ret double %div +; CHECK-LABEL: @emit_xsdivdp +; CHECK: xsdivdp {{[0-9]+}} +} + +; Function Attrs: nounwind +define double @emit_xsmuldp() { +entry: + %0 = load double, double* @c, align 8 + %1 = load double, double* @d, align 8 + %mul = fmul double %0, %1 + ret double %mul +; CHECK-LABEL: @emit_xsmuldp +; CHECK: xsmuldp {{[0-9]+}} +} + +; Function Attrs: nounwind +define double @emit_xssqrtdp() { +entry: + %0 = load double, double* @d, align 8 + %call = call double @sqrt(double %0) + ret double %call +; CHECK-LABEL: @emit_xssqrtdp +; CHECK: xssqrtdp {{[0-9]+}} +} + +; Function Attrs: nounwind +declare double @sqrt(double) diff --git a/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll b/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll new file mode 100644 index 00000000000..f589c6c103e --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vsx-recip-est.ll @@ -0,0 +1,62 @@ +; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -enable-unsafe-fp-math | FileCheck %s +@a = global float 3.000000e+00, align 4 +@b = global float 4.000000e+00, align 4 +@c = global double 3.000000e+00, align 8 +@d = global double 4.000000e+00, align 8 + +; Function Attrs: nounwind +define float @emit_xsresp() { +entry: + %0 = load float, float* @a, align 4 + %1 = load float, float* @b, align 4 + %div = fdiv fast float %0, %1 + ret float %div +; CHECK-LABEL: @emit_xsresp +; CHECK: xsresp {{[0-9]+}} +} + +; Function Attrs: nounwind +define float @emit_xsrsqrtesp(float %f) { +entry: + %f.addr = alloca float, align 4 + store float %f, float* %f.addr, align 4 + %0 = load float, float* %f.addr, align 4 + %1 = load float, float* @b, align 4 + %2 = call float @llvm.sqrt.f32(float %1) + %div = fdiv fast float %0, %2 + ret float %div +; CHECK-LABEL: @emit_xsrsqrtesp +; CHECK: xsrsqrtesp {{[0-9]+}} +} + +; Function Attrs: nounwind readnone +declare float @llvm.sqrt.f32(float) + +; Function Attrs: nounwind +define double @emit_xsredp() { +entry: + %0 = load double, double* @c, align 8 + %1 = load double, double* @d, align 8 + %div = fdiv fast double %0, %1 + ret double %div +; CHECK-LABEL: @emit_xsredp +; CHECK: xsredp {{[0-9]+}} +} + +; Function Attrs: nounwind +define double @emit_xsrsqrtedp(double %f) { +entry: + %f.addr = alloca double, align 8 + store double %f, double* %f.addr, align 8 + %0 = load double, double* %f.addr, align 8 + %1 = load double, double* @d, align 8 + %2 = call double @llvm.sqrt.f64(double %1) + %div = fdiv fast double %0, %2 + ret double %div +; CHECK-LABEL: @emit_xsrsqrtedp +; CHECK: xsrsqrtedp {{[0-9]+}} +} + +; Function Attrs: nounwind readnone +declare double @llvm.sqrt.f64(double) #1 |

