diff options
author | Sanjay Patel <spatel@rotateright.com> | 2019-07-29 13:50:25 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2019-07-29 13:50:25 +0000 |
commit | e9ee7b47d42b3e9f645810e65905a43b583d55c7 (patch) | |
tree | c283f2ea9a2d63d00c31961faedbf2c9a790f7f8 | |
parent | 98d281a99f1f60eac65ce348157dcec49742378f (diff) | |
download | bcm5719-llvm-e9ee7b47d42b3e9f645810e65905a43b583d55c7.tar.gz bcm5719-llvm-e9ee7b47d42b3e9f645810e65905a43b583d55c7.zip |
[InstCombine] fold fadd+fneg with fdiv/fmul betweena
The backend already does this via isNegatibleForFree(),
but we may want to alter the fneg IR canonicalizations
that currently exist, so we need to try harder to fold
fneg in IR to avoid regressions.
llvm-svn: 367227
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp | 18 | ||||
-rw-r--r-- | llvm/test/Transforms/InstCombine/fadd.ll | 16 |
2 files changed, 26 insertions, 8 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 79d9ddde569..328c5021967 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1387,6 +1387,24 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) { if (match(&I, m_c_FAdd(m_FNeg(m_Value(X)), m_Value(Y)))) return BinaryOperator::CreateFSubFMF(Y, X, &I); + // Similar to above, but look through fmul/fdiv for the negated term. + // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants] + Value *Z; + if (match(&I, m_c_FAdd(m_OneUse(m_c_FMul(m_FNeg(m_Value(X)), m_Value(Y))), + m_Value(Z)))) { + Value *XY = Builder.CreateFMulFMF(X, Y, &I); + return BinaryOperator::CreateFSubFMF(Z, XY, &I); + } + // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants] + // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants] + if (match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_FNeg(m_Value(X)), m_Value(Y))), + m_Value(Z))) || + match(&I, m_c_FAdd(m_OneUse(m_FDiv(m_Value(X), m_FNeg(m_Value(Y)))), + m_Value(Z)))) { + Value *XY = Builder.CreateFDivFMF(X, Y, &I); + return BinaryOperator::CreateFSubFMF(Z, XY, &I); + } + // Check for (fadd double (sitofp x), y), see if we can merge this into an // integer add followed by a promotion. Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); diff --git a/llvm/test/Transforms/InstCombine/fadd.ll b/llvm/test/Transforms/InstCombine/fadd.ll index 77a6b0d9a90..50022db9c52 100644 --- a/llvm/test/Transforms/InstCombine/fadd.ll +++ b/llvm/test/Transforms/InstCombine/fadd.ll @@ -246,8 +246,8 @@ define float @fdiv_fneg1_extra_use2(float %x, float %y, float %z) { ; CHECK-LABEL: @fdiv_fneg1_extra_use2( ; CHECK-NEXT: [[NEG:%.*]] = fsub float -0.000000e+00, [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) -; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[DIV]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[X]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; %neg = fsub float -0.000000e+00, %x @@ -263,8 +263,8 @@ define float @fdiv_fneg2_extra_use2(float %x, float %y, float %z) { ; CHECK-LABEL: @fdiv_fneg2_extra_use2( ; CHECK-NEXT: [[NEG:%.*]] = fsub float -0.000000e+00, [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) -; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[Y:%.*]], [[NEG]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[DIV]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[Y:%.*]], [[X]] +; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; %neg = fsub float -0.000000e+00, %x @@ -280,8 +280,8 @@ define <2 x float> @fmul_fneg1_extra_use2(<2 x float> %x, <2 x float> %y, <2 x f ; CHECK-LABEL: @fmul_fneg1_extra_use2( ; CHECK-NEXT: [[NEG:%.*]] = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, [[X:%.*]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[MUL]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x float> [[R]] ; %neg = fsub <2 x float> <float -0.0, float -0.0>, %x @@ -298,8 +298,8 @@ define float @fmul_fneg2_extra_use2(float %x, float %py, float %z) { ; CHECK-NEXT: [[Y:%.*]] = frem float -4.200000e+01, [[PY:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = fsub float -0.000000e+00, [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y]], [[NEG]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[MUL]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Y]], [[X]] +; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; %y = frem float -42.0, %py ; thwart complexity-based canonicalization |