summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/PowerPC/machine-combiner.ll
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2015-07-15 08:23:05 +0000
committerHal Finkel <hfinkel@anl.gov>2015-07-15 08:23:05 +0000
commit5d36b230b5ea88cb6e06f604431f856808703969 (patch)
tree6aa71cd2555797b69368d5f38dceef8051bc7c5a /llvm/test/CodeGen/PowerPC/machine-combiner.ll
parent673b493e9859dcb63ae035a4b1de0d3cc1cb0e1d (diff)
downloadbcm5719-llvm-5d36b230b5ea88cb6e06f604431f856808703969.tar.gz
bcm5719-llvm-5d36b230b5ea88cb6e06f604431f856808703969.zip
[PowerPC] Use the MachineCombiner to reassociate fadd/fmul
This is a direct port of the code from the X86 backend (r239486/r240361), which uses the MachineCombiner to reassociate (floating-point) adds/muls to increase ILP, to the PowerPC backend. The rationale is the same. There is a lot of copy-and-paste here between the X86 code and the PowerPC code, and we should extract at least some of this into CodeGen somewhere. However, I don't want to do that until this code is enhanced to handle FMAs as well. After that, we'll be in a better position to extract the common parts. llvm-svn: 242279
Diffstat (limited to 'llvm/test/CodeGen/PowerPC/machine-combiner.ll')
-rw-r--r--llvm/test/CodeGen/PowerPC/machine-combiner.ll188
1 files changed, 188 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/machine-combiner.ll b/llvm/test/CodeGen/PowerPC/machine-combiner.ll
new file mode 100644
index 00000000000..93fb2020d53
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/machine-combiner.ll
@@ -0,0 +1,188 @@
+; RUN: llc -O3 -mcpu=pwr7 -enable-unsafe-fp-math < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-PWR
+; RUN: llc -O3 -mcpu=a2q -enable-unsafe-fp-math < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-QPX
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Verify that the first two adds are independent regardless of how the inputs are
+; commuted. The destination registers are used as source registers for the third add.
+
+define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
+; CHECK-LABEL: reassociate_adds1:
+; CHECK: # BB#0:
+; CHECK: fadds [[REG0:[0-9]+]], 1, 2
+; CHECK: fadds [[REG1:[0-9]+]], 3, 4
+; CHECK: fadds 1, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd float %x0, %x1
+ %t1 = fadd float %t0, %x2
+ %t2 = fadd float %t1, %x3
+ ret float %t2
+}
+
+define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
+; CHECK-LABEL: reassociate_adds2:
+; CHECK: # BB#0:
+; CHECK: fadds [[REG0:[0-9]+]], 1, 2
+; CHECK: fadds [[REG1:[0-9]+]], 3, 4
+; CHECK: fadds 1, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd float %x0, %x1
+ %t1 = fadd float %x2, %t0
+ %t2 = fadd float %t1, %x3
+ ret float %t2
+}
+
+define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
+; CHECK-LABEL: reassociate_adds3:
+; CHECK: # BB#0:
+; CHECK: fadds [[REG0:[0-9]+]], 1, 2
+; CHECK: fadds [[REG1:[0-9]+]], 3, 4
+; CHECK: fadds 1, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd float %x0, %x1
+ %t1 = fadd float %t0, %x2
+ %t2 = fadd float %x3, %t1
+ ret float %t2
+}
+
+define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
+; CHECK-LABEL: reassociate_adds4:
+; CHECK: # BB#0:
+; CHECK: fadds [[REG0:[0-9]+]], 1, 2
+; CHECK: fadds [[REG1:[0-9]+]], 3, 4
+; CHECK: fadds 1, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd float %x0, %x1
+ %t1 = fadd float %x2, %t0
+ %t2 = fadd float %x3, %t1
+ ret float %t2
+}
+
+; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not
+; produced because that would cost more compile time.
+
+define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) {
+; CHECK-LABEL: reassociate_adds5:
+; CHECK: # BB#0:
+; CHECK: fadds [[REG12:[0-9]+]], 5, 6
+; CHECK: fadds [[REG0:[0-9]+]], 1, 2
+; CHECK: fadds [[REG11:[0-9]+]], 3, 4
+; CHECK: fadds [[REG13:[0-9]+]], [[REG12]], 7
+; CHECK: fadds [[REG1:[0-9]+]], [[REG0]], [[REG11]]
+; CHECK: fadds [[REG2:[0-9]+]], [[REG1]], [[REG13]]
+; CHECK: fadds 1, [[REG2]], 8
+; CHECK-NEXT: blr
+
+ %t0 = fadd float %x0, %x1
+ %t1 = fadd float %t0, %x2
+ %t2 = fadd float %t1, %x3
+ %t3 = fadd float %t2, %x4
+ %t4 = fadd float %t3, %x5
+ %t5 = fadd float %t4, %x6
+ %t6 = fadd float %t5, %x7
+ ret float %t6
+}
+
+; Verify that we reassociate vector instructions too.
+
+define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
+; CHECK-LABEL: vector_reassociate_adds1:
+; CHECK: # BB#0:
+; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
+; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
+; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
+; CHECK-PWR: xvaddsp [[REG0:[0-9]+]], 34, 35
+; CHECK-PWR: xvaddsp [[REG1:[0-9]+]], 36, 37
+; CHECK-PWR: xvaddsp 34, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd <4 x float> %x0, %x1
+ %t1 = fadd <4 x float> %t0, %x2
+ %t2 = fadd <4 x float> %t1, %x3
+ ret <4 x float> %t2
+}
+
+define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
+; CHECK-LABEL: vector_reassociate_adds2:
+; CHECK: # BB#0:
+; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
+; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
+; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
+; CHECK-PWR: xvaddsp [[REG0:[0-9]+]], 34, 35
+; CHECK-PWR: xvaddsp [[REG1:[0-9]+]], 36, 37
+; CHECK-PWR: xvaddsp 34, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd <4 x float> %x0, %x1
+ %t1 = fadd <4 x float> %x2, %t0
+ %t2 = fadd <4 x float> %t1, %x3
+ ret <4 x float> %t2
+}
+
+define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
+; CHECK-LABEL: vector_reassociate_adds3:
+; CHECK: # BB#0:
+; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
+; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
+; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
+; CHECK-PWR: xvaddsp [[REG0:[0-9]+]], 34, 35
+; CHECK-PWR: xvaddsp [[REG1:[0-9]+]], 36, 37
+; CHECK-PWR: xvaddsp 34, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd <4 x float> %x0, %x1
+ %t1 = fadd <4 x float> %t0, %x2
+ %t2 = fadd <4 x float> %x3, %t1
+ ret <4 x float> %t2
+}
+
+define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
+; CHECK-LABEL: vector_reassociate_adds4:
+; CHECK: # BB#0:
+; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2
+; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4
+; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]]
+; CHECK-PWR: xvaddsp [[REG0:[0-9]+]], 34, 35
+; CHECK-PWR: xvaddsp [[REG1:[0-9]+]], 36, 37
+; CHECK-PWR: xvaddsp 34, [[REG0]], [[REG1]]
+; CHECK-NEXT: blr
+
+ %t0 = fadd <4 x float> %x0, %x1
+ %t1 = fadd <4 x float> %x2, %t0
+ %t2 = fadd <4 x float> %x3, %t1
+ ret <4 x float> %t2
+}
+
+define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
+ %t0 = fdiv float %x0, %x1
+ %t1 = fadd float %x2, %t0
+ %t2 = fadd float %x3, %t1
+ ret float %t2
+}
+
+define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
+ %t0 = fdiv float %x0, %x1
+ %t1 = fmul float %x2, %t0
+ %t2 = fmul float %x3, %t1
+ ret float %t2
+}
+
+define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) {
+ %t0 = fdiv double %x0, %x1
+ %t1 = fadd double %x2, %t0
+ %t2 = fadd double %x3, %t1
+ ret double %t2
+}
+
+define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) {
+ %t0 = fdiv double %x0, %x1
+ %t1 = fmul double %x2, %t0
+ %t2 = fmul double %x3, %t1
+ ret double %t2
+}
+
+
OpenPOWER on IntegriCloud