summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-07-13 01:18:07 +0000
committerSanjay Patel <spatel@rotateright.com>2018-07-13 01:18:07 +0000
commit70043b7e9ad7c86c61ac5087aa16c6ed366990e1 (patch)
tree8dd49ed0bbfb125fb40edd7a0a700d66868431bd /llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
parent90ad6835ddaad46efff327db0e2c83bfb1563075 (diff)
downloadbcm5719-llvm-70043b7e9ad7c86c61ac5087aa16c6ed366990e1.tar.gz
bcm5719-llvm-70043b7e9ad7c86c61ac5087aa16c6ed366990e1.zip
[InstCombine] return when SimplifyAssociativeOrCommutative makes a change
This bug was created by rL335258 because we used to always call instsimplify after trying the associative folds. After that change it became possible for subsequent folds to encounter unsimplified code (and potentially assert because of it). Instead of carrying changed state through instcombine, we can just return immediately. This allows instsimplify to run, so we can continue assuming that easy folds have already occurred. llvm-svn: 336965
Diffstat (limited to 'llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp11
1 files changed, 8 insertions, 3 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 330db9eb91f..aa31e0d850d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1126,7 +1126,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
- bool Changed = SimplifyAssociativeOrCommutative(I);
+ if (SimplifyAssociativeOrCommutative(I))
+ return &I;
+
if (Instruction *X = foldShuffledBinop(I))
return X;
@@ -1367,6 +1369,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// TODO(jingyue): Consider willNotOverflowSignedAdd and
// willNotOverflowUnsignedAdd to reduce the number of invocations of
// computeKnownBits.
+ bool Changed = false;
if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) {
Changed = true;
I.setHasNoSignedWrap(true);
@@ -1388,7 +1391,9 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
- bool Changed = SimplifyAssociativeOrCommutative(I);
+ if (SimplifyAssociativeOrCommutative(I))
+ return &I;
+
if (Instruction *X = foldShuffledBinop(I))
return X;
@@ -1471,7 +1476,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
return replaceInstUsesWith(I, V);
}
- return Changed ? &I : nullptr;
+ return nullptr;
}
/// Optimize pointer differences into the same array into a size. Consider:
OpenPOWER on IntegriCloud