summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2017-08-07 18:10:39 +0000
committerCraig Topper <craig.topper@intel.com>2017-08-07 18:10:39 +0000
commit7091a743b41d531ffcce207cca2439b3567271a2 (patch)
tree63cfaec53fbcaa29d2fc5f1ec72defb85205426b /llvm/lib
parentb0d208a0abdfedf12dd981453620a7a01d3e076f (diff)
downloadbcm5719-llvm-7091a743b41d531ffcce207cca2439b3567271a2.tar.gz
bcm5719-llvm-7091a743b41d531ffcce207cca2439b3567271a2.zip
[InstCombine] Support (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2) for vector splats
Note the original code I deleted incorrectly listed this as (X | C1) & C2 --> (X & C2^(C1&C2)) | C1 Which is only valid if C1 is a subset of C2. This relied on SimplifyDemandedBits to remove any extra bits from C1 before we got to that code. My new implementation avoids relying on that behavior so that it can be naively verified with alive. Differential Revision: https://reviews.llvm.org/D36384 llvm-svn: 310272
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp31
1 files changed, 16 insertions, 15 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 1699a0ce89c..db8e6ecb0bf 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -126,21 +126,6 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
switch (Op->getOpcode()) {
default: break;
- case Instruction::Or:
- if (Op->hasOneUse()){
- ConstantInt *TogetherCI = dyn_cast<ConstantInt>(Together);
- if (TogetherCI && !TogetherCI->isZero()){
- // (X | C1) & C2 --> (X & (C2^(C1&C2))) | C1
- // NOTE: This reduces the number of bits set in the & mask, which
- // can expose opportunities for store narrowing.
- Together = ConstantExpr::getXor(AndRHS, Together);
- Value *And = Builder.CreateAnd(X, Together);
- And->takeName(Op);
- return BinaryOperator::CreateOr(And, OpRHS);
- }
- }
-
- break;
case Instruction::Add:
if (Op->hasOneUse()) {
// Adding a one to a single bit bit-field should be turned into an XOR
@@ -1223,6 +1208,22 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
return BinaryOperator::CreateXor(And, NewC);
}
+ const APInt *OrC;
+ if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
+ // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
+ // NOTE: This reduces the number of bits set in the & mask, which
+ // can expose opportunities for store narrowing for scalars.
+ // NOTE: SimplifyDemandedBits should have already removed bits from C1
+ // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
+ // above, but this feels safer.
+ APInt Together = *C & *OrC;
+ Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(),
+ Together ^ *C));
+ And->takeName(Op0);
+ return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(),
+ Together));
+ }
+
// If the mask is only needed on one incoming arm, push the 'and' op up.
if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
OpenPOWER on IntegriCloud