summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorRoman Lebedev <lebedev.ri@gmail.com>2019-07-01 15:55:15 +0000
committerRoman Lebedev <lebedev.ri@gmail.com>2019-07-01 15:55:15 +0000
commit72b8d41ce811fa1a20711e0619d4a5307a754e57 (patch)
tree561ad4db85cd579db4488c55fa1b867a037033b9 /llvm/lib/Transforms
parent5abf80cdfa347c8b6306157011e7f85212994428 (diff)
downloadbcm5719-llvm-72b8d41ce811fa1a20711e0619d4a5307a754e57.tar.gz
bcm5719-llvm-72b8d41ce811fa1a20711e0619d4a5307a754e57.zip
[InstCombine] Shift amount reassociation in bittest (PR42399)
Summary: Given pattern: `icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0` we should move shifts to the same hand of 'and', i.e. rewrite as `icmp eq/ne (and (x shift (Q+K)), y), 0` iff `(Q+K) u< bitwidth(x)` It might be tempting to not restrict this to situations where we know we'd fold two shifts together, but i'm not sure what rules should there be to avoid endless combine loops. We pick the same shift that was originally used to shift the variable we picked to shift: https://rise4fun.com/Alive/6x1v Should fix [[ https://bugs.llvm.org/show_bug.cgi?id=42399 | PR42399]]. Reviewers: spatel, nikic, RKSimon Reviewed By: spatel Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D63829 llvm-svn: 364791
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp60
1 files changed, 60 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 40c3b79e88d..664edc7c9e5 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -3270,6 +3270,63 @@ foldICmpWithTruncSignExtendedVal(ICmpInst &I,
return T1;
}
+// Given pattern:
+// icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
+// we should move shifts to the same hand of 'and', i.e. rewrite as
+// icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
+// We are only interested in opposite logical shifts here.
+// If we can, we want to end up creating 'lshr' shift.
+static Value *
+foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
+ InstCombiner::BuilderTy &Builder) {
+ if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
+ !I.getOperand(0)->hasOneUse())
+ return nullptr;
+
+ auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
+ auto m_AnyLShr = m_LShr(m_Value(), m_Value());
+
+ // Look for an 'and' of two (opposite) logical shifts.
+ // Pick the single-use shift as XShift.
+ Value *XShift, *YShift;
+ if (!match(I.getOperand(0),
+ m_c_And(m_OneUse(m_CombineAnd(m_AnyLogicalShift, m_Value(XShift))),
+ m_CombineAnd(m_AnyLogicalShift, m_Value(YShift)))))
+ return nullptr;
+
+ // If YShift is a single-use 'lshr', swap the shifts around.
+ if (match(YShift, m_OneUse(m_AnyLShr)))
+ std::swap(XShift, YShift);
+
+ // The shifts must be in opposite directions.
+ Instruction::BinaryOps XShiftOpcode =
+ cast<BinaryOperator>(XShift)->getOpcode();
+ if (XShiftOpcode == cast<BinaryOperator>(YShift)->getOpcode())
+ return nullptr; // Do not care about same-direction shifts here.
+
+ Value *X, *XShAmt, *Y, *YShAmt;
+ match(XShift, m_BinOp(m_Value(X), m_Value(XShAmt)));
+ match(YShift, m_BinOp(m_Value(Y), m_Value(YShAmt)));
+
+ // Can we fold (XShAmt+YShAmt) ?
+ Value *NewShAmt = SimplifyBinOp(Instruction::BinaryOps::Add, XShAmt, YShAmt,
+ SQ.getWithInstruction(&I));
+ if (!NewShAmt)
+ return nullptr;
+ // Is the new shift amount smaller than the bit width?
+ // FIXME: could also rely on ConstantRange.
+ unsigned BitWidth = X->getType()->getScalarSizeInBits();
+ if (!match(NewShAmt, m_SpecificInt_ULT(APInt(BitWidth, BitWidth))))
+ return nullptr;
+ // All good, we can do this fold. The shift is the same that was for X.
+ Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
+ ? Builder.CreateLShr(X, NewShAmt)
+ : Builder.CreateShl(X, NewShAmt);
+ Value *T1 = Builder.CreateAnd(T0, Y);
+ return Builder.CreateICmp(I.getPredicate(), T1,
+ Constant::getNullValue(X->getType()));
+}
+
/// Try to fold icmp (binop), X or icmp X, (binop).
/// TODO: A large part of this logic is duplicated in InstSimplify's
/// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
@@ -3625,6 +3682,9 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
return replaceInstUsesWith(I, V);
+ if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
+ return replaceInstUsesWith(I, V);
+
return nullptr;
}
OpenPOWER on IntegriCloud