diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2015-12-02 09:07:55 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2015-12-02 09:07:55 +0000 |
commit | 3fc3454a0c84de7e7f5a19714680f9606e436c89 (patch) | |
tree | 5fc50626f17bd9d0a0c659dcd753cf33087adc82 /llvm/test/CodeGen/X86/fma_patterns.ll | |
parent | a1a40cce9fa06285247d79a7f1a63c209c3c4b9f (diff) | |
download | bcm5719-llvm-3fc3454a0c84de7e7f5a19714680f9606e436c89.tar.gz bcm5719-llvm-3fc3454a0c84de7e7f5a19714680f9606e436c89.zip |
[X86][FMA] Optimize FNEG(FMUL) Patterns
On FMA targets, we can avoid having to load a constant to negate a float/double multiply by instead using a FNMSUB (-(X*Y)-0)
Fix for PR24366
Differential Revision: http://reviews.llvm.org/D14909
llvm-svn: 254495
Diffstat (limited to 'llvm/test/CodeGen/X86/fma_patterns.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/fma_patterns.ll | 85 |
1 files changed, 84 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/X86/fma_patterns.ll b/llvm/test/CodeGen/X86/fma_patterns.ll index e3295e45823..0f0dd20da04 100644 --- a/llvm/test/CodeGen/X86/fma_patterns.ll +++ b/llvm/test/CodeGen/X86/fma_patterns.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+fma -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4 -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=FMA4 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl -fp-contract=fast | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 ; ; Pattern: (fadd (fmul x, y), z) -> (fmadd x,y,z) @@ -1109,4 +1109,87 @@ define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y ret <4 x float> %a } +; Pattern: (fneg (fmul x, y)) -> (fnmsub x, y, 0) + +define double @test_f64_fneg_fmul(double %x, double %y) #0 { +; FMA-LABEL: test_f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA4-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vfnmsub213sd %xmm2, %xmm0, %xmm1 +; AVX512-NEXT: vmovaps %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul nsz double %x, %y + %n = fsub double -0.0, %m + ret double %n +} + +define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 { +; FMA-LABEL: test_v4f32_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v4f32_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA4-NEXT: vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v4f32_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 +; AVX512-NEXT: retq + %m = fmul nsz <4 x float> %x, %y + %n = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %m + ret <4 x float> %n +} + +define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 { +; FMA-LABEL: test_v4f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorpd %ymm2, %ymm2, %ymm2 +; FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v4f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorpd %ymm2, %ymm2, %ymm2 +; FMA4-NEXT: vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v4f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %ymm2, %ymm2, %ymm2 +; AVX512-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 +; AVX512-NEXT: retq + %m = fmul nsz <4 x double> %x, %y + %n = fsub <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %m + ret <4 x double> %n +} + +define <4 x double> @test_v4f64_fneg_fmul_no_nsz(<4 x double> %x, <4 x double> %y) #0 { +; ALL-LABEL: test_v4f64_fneg_fmul_no_nsz: +; ALL: # BB#0: +; ALL-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; ALL-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0 +; ALL-NEXT: retq + %m = fmul <4 x double> %x, %y + %n = fsub <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %m + ret <4 x double> %n +} + attributes #0 = { "unsafe-fp-math"="true" } |