summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorGil Rapaport <gil.rapaport@intel.com>2018-06-26 05:31:18 +0000
committerGil Rapaport <gil.rapaport@intel.com>2018-06-26 05:31:18 +0000
commitda2e2caa6c99ae067a26a0b56f976303be17d71c (patch)
tree654d72dd250671aea987ab424a14217609e79669 /llvm
parent08dae1682d5f4590fa9dc0bc2ce8c614fef23579 (diff)
downloadbcm5719-llvm-da2e2caa6c99ae067a26a0b56f976303be17d71c.tar.gz
bcm5719-llvm-da2e2caa6c99ae067a26a0b56f976303be17d71c.zip
[InstCombine] (A + 1) + (B ^ -1) --> A - B
Turn canonicalized subtraction back into (-1 - B) and combine it with (A + 1) into (A - B). This is similar to the folding already done for (B ^ -1) + Const into (-1 + Const) - B. Differential Revision: https://reviews.llvm.org/D48535 llvm-svn: 335579
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp5
-rw-r--r--llvm/test/Transforms/InstCombine/add.ll37
2 files changed, 42 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 1101f729063..330db9eb91f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1212,6 +1212,11 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = checkForNegativeOperand(I, Builder))
return replaceInstUsesWith(I, V);
+ // (A + 1) + ~B --> A - B
+ // ~B + (A + 1) --> A - B
+ if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))))
+ return BinaryOperator::CreateSub(A, B);
+
// X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1)
if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V);
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 61eb6f9317e..04a51087132 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -802,3 +802,40 @@ final:
%value = add <2 x i32> <i32 123, i32 333>, %A
ret <2 x i32> %value
}
+
+; E = (A + 1) + ~B = A - B
+define i32 @add_not_increment(i32 %A, i32 %B) {
+; CHECK-LABEL: @add_not_increment(
+; CHECK-NEXT: [[E:%.*]] = sub i32 %A, %B
+; CHECK-NEXT: ret i32 [[E]]
+;
+ %C = xor i32 %B, -1
+ %D = add i32 %A, 1
+ %E = add i32 %D, %C
+ ret i32 %E
+}
+
+; E = (A + 1) + ~B = A - B
+define <2 x i32> @add_not_increment_vec(<2 x i32> %A, <2 x i32> %B) {
+; CHECK-LABEL: @add_not_increment_vec(
+; CHECK-NEXT: [[E:%.*]] = sub <2 x i32> %A, %B
+; CHECK-NEXT: ret <2 x i32> [[E]]
+;
+ %C = xor <2 x i32> %B, <i32 -1, i32 -1>
+ %D = add <2 x i32> %A, <i32 1, i32 1>
+ %E = add <2 x i32> %D, %C
+ ret <2 x i32> %E
+}
+
+; E = ~B + (1 + A) = A - B
+define i32 @add_not_increment_commuted(i32 %A, i32 %B) {
+; CHECK-LABEL: @add_not_increment_commuted(
+; CHECK-NEXT: [[E:%.*]] = sub i32 %A, %B
+; CHECK-NEXT: ret i32 [[E]]
+;
+ %C = xor i32 %B, -1
+ %D = add i32 %A, 1
+ %E = add i32 %C, %D
+ ret i32 %E
+}
+
OpenPOWER on IntegriCloud