summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp5
-rw-r--r--llvm/test/Transforms/InstCombine/fast-math.ll18
2 files changed, 13 insertions, 10 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index f3fe09f1cb0..8b355dcd1b2 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1911,6 +1911,11 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X))))
return BinaryOperator::CreateFNegFMF(X, &I);
+ // Y - (X + Y) --> -X
+ // Y - (Y + X) --> -X
+ if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X))))
+ return BinaryOperator::CreateFNegFMF(X, &I);
+
// TODO: This performs reassociative folds for FP ops. Some fraction of the
// functionality has been subsumed by simple pattern matching here and in
// InstSimplify. We should let a dedicated reassociation pass handle more
diff --git a/llvm/test/Transforms/InstCombine/fast-math.ll b/llvm/test/Transforms/InstCombine/fast-math.ll
index 8a802105885..98f878a5b18 100644
--- a/llvm/test/Transforms/InstCombine/fast-math.ll
+++ b/llvm/test/Transforms/InstCombine/fast-math.ll
@@ -269,8 +269,8 @@ define float @fold8_reassoc(float %f1) {
define float @fsub_fadd_common_op_fneg(float %x, float %y) {
; CHECK-LABEL: @fsub_fadd_common_op_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float -0.000000e+00, [[X:%.*]]
-; CHECK-NEXT: ret float [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = fsub fast float -0.000000e+00, [[X:%.*]]
+; CHECK-NEXT: ret float [[R]]
;
%a = fadd float %x, %y
%r = fsub fast float %y, %a
@@ -283,8 +283,8 @@ define float @fsub_fadd_common_op_fneg(float %x, float %y) {
define float @fsub_fadd_common_op_fneg_reassoc_nsz(float %x, float %y) {
; CHECK-LABEL: @fsub_fadd_common_op_fneg_reassoc_nsz(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float -0.000000e+00, [[X:%.*]]
-; CHECK-NEXT: ret float [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = fsub reassoc nsz float -0.000000e+00, [[X:%.*]]
+; CHECK-NEXT: ret float [[R]]
;
%a = fadd float %x, %y
%r = fsub reassoc nsz float %y, %a
@@ -295,8 +295,7 @@ define float @fsub_fadd_common_op_fneg_reassoc_nsz(float %x, float %y) {
define <2 x float> @fsub_fadd_common_op_fneg_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @fsub_fadd_common_op_fneg_vec(
-; CHECK-NEXT: [[A:%.*]] = fadd <2 x float> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fsub reassoc nsz <2 x float> [[Y]], [[A]]
+; CHECK-NEXT: [[R:%.*]] = fsub reassoc nsz <2 x float> <float -0.000000e+00, float -0.000000e+00>, [[X:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%a = fadd <2 x float> %x, %y
@@ -309,8 +308,8 @@ define <2 x float> @fsub_fadd_common_op_fneg_vec(<2 x float> %x, <2 x float> %y)
define float @fsub_fadd_common_op_fneg_commute(float %x, float %y) {
; CHECK-LABEL: @fsub_fadd_common_op_fneg_commute(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float -0.000000e+00, [[X:%.*]]
-; CHECK-NEXT: ret float [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = fsub reassoc nsz float -0.000000e+00, [[X:%.*]]
+; CHECK-NEXT: ret float [[R]]
;
%a = fadd float %y, %x
%r = fsub reassoc nsz float %y, %a
@@ -321,8 +320,7 @@ define float @fsub_fadd_common_op_fneg_commute(float %x, float %y) {
define <2 x float> @fsub_fadd_common_op_fneg_commute_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @fsub_fadd_common_op_fneg_commute_vec(
-; CHECK-NEXT: [[A:%.*]] = fadd <2 x float> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fsub reassoc nsz <2 x float> [[Y]], [[A]]
+; CHECK-NEXT: [[R:%.*]] = fsub reassoc nsz <2 x float> <float -0.000000e+00, float -0.000000e+00>, [[X:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%a = fadd <2 x float> %y, %x
OpenPOWER on IntegriCloud