diff options
author | Sanjay Patel <spatel@rotateright.com> | 2015-07-09 22:48:54 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2015-07-09 22:48:54 +0000 |
commit | ea81edf351c110c97407bde489a43c48b8cc3a9c (patch) | |
tree | 0f2c1f3988f74b88d8f7ac4e592daef90bc42c54 /llvm/test/CodeGen | |
parent | 549820b66aa30c74216c5d8b884193e3b9a8cbd0 (diff) | |
download | bcm5719-llvm-ea81edf351c110c97407bde489a43c48b8cc3a9c.tar.gz bcm5719-llvm-ea81edf351c110c97407bde489a43c48b8cc3a9c.zip |
[x86] enable machine combiner reassociations for scalar double-precision adds
llvm-svn: 241871
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/X86/machine-combiner.ll | 24 |
1 files changed, 23 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll index 2286da7e94d..ae059a1ed08 100644 --- a/llvm/test/CodeGen/X86/machine-combiner.ll +++ b/llvm/test/CodeGen/X86/machine-combiner.ll @@ -144,7 +144,7 @@ define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { ret float %t2 } -; Verify that SSE and AVX scalar single precison multiplies are reassociated. +; Verify that SSE and AVX scalar single-precison multiplies are reassociated. define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_muls1: @@ -165,3 +165,25 @@ define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { %t2 = fmul float %x3, %t1 ret float %t2 } + +; Verify that SSE and AVX scalar double-precison adds are reassociated. + +define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) { +; SSE-LABEL: reassociate_adds_double: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: addsd %xmm3, %xmm2 +; SSE-NEXT: addsd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_adds_double: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fdiv double %x0, %x1 + %t1 = fadd double %x2, %t0 + %t2 = fadd double %x3, %t1 + ret double %t2 +} |