diff options
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/machine-combiner.ll | 24 |
2 files changed, 25 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 5484ae91855..fdfdac90033 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6408,7 +6408,9 @@ static bool hasReassocSibling(const MachineInstr &Inst, bool &Commuted) { // 2. Other math / logic operations (and, or) static bool isAssociativeAndCommutative(unsigned Opcode) { switch (Opcode) { + case X86::ADDSDrr: case X86::ADDSSrr: + case X86::VADDSDrr: case X86::VADDSSrr: case X86::MULSSrr: case X86::VMULSSrr: diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll index 2286da7e94d..ae059a1ed08 100644 --- a/llvm/test/CodeGen/X86/machine-combiner.ll +++ b/llvm/test/CodeGen/X86/machine-combiner.ll @@ -144,7 +144,7 @@ define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { ret float %t2 } -; Verify that SSE and AVX scalar single precison multiplies are reassociated. +; Verify that SSE and AVX scalar single-precison multiplies are reassociated. define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_muls1: @@ -165,3 +165,25 @@ define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { %t2 = fmul float %x3, %t1 ret float %t2 } + +; Verify that SSE and AVX scalar double-precison adds are reassociated. + +define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) { +; SSE-LABEL: reassociate_adds_double: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: addsd %xmm3, %xmm2 +; SSE-NEXT: addsd %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: reassociate_adds_double: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm1 +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %t0 = fdiv double %x0, %x1 + %t1 = fadd double %x2, %t0 + %t2 = fadd double %x3, %t1 + ret double %t2 +} |