summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2016-02-23 16:36:07 +0000
committerSanjay Patel <spatel@rotateright.com>2016-02-23 16:36:07 +0000
commit40e7ba00467fcd1f15ed8e540d43eb6caf200b22 (patch)
tree0afae6f866bb8802fd32c09b6420e46a7d9df0c5 /llvm/lib
parent9fe1e550c8086211372270d9f29cc806c09a4d19 (diff)
downloadbcm5719-llvm-40e7ba00467fcd1f15ed8e540d43eb6caf200b22.tar.gz
bcm5719-llvm-40e7ba00467fcd1f15ed8e540d43eb6caf200b22.zip
[InstCombine] add helper function to foldCastedBitwiseLogic() ; NFCI
This is a straight cut and paste of the existing code and is intended to be the first step in solving part of PR26702: https://llvm.org/bugs/show_bug.cgi?id=26702 We should be able to reuse most of this and delete the nearly identical existing code in visitOr(). Then, we can enhance visitXor() to use the same code too. llvm-svn: 261649
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp69
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineInternal.h1
2 files changed, 41 insertions, 29 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 8ca0e508801..4ac740ec408 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1243,6 +1243,44 @@ static Instruction *matchDeMorgansLaws(BinaryOperator &I,
return nullptr;
}
+Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
+ Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+ if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
+ Value *Op0COp = Op0C->getOperand(0);
+ Type *SrcTy = Op0COp->getType();
+ // fold (and (cast A), (cast B)) -> (cast (and A, B))
+ if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
+ if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
+ SrcTy == Op1C->getOperand(0)->getType() &&
+ SrcTy->isIntOrIntVectorTy()) {
+ Value *Op1COp = Op1C->getOperand(0);
+
+ // Only do this if the casts both really cause code to be generated.
+ if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
+ ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
+ Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
+ return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
+ }
+
+ // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
+ if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
+ if (Value *Res = FoldAndOfICmps(LHS, RHS))
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+
+ // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
+ // cast is otherwise not optimizable. This happens for vector sexts.
+ if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
+ if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
+ if (Value *Res = FoldAndOfFCmps(LHS, RHS))
+ return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
+ }
+ }
+ }
+ return nullptr;
+}
+
Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
@@ -1480,39 +1518,12 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (Value *Res = FoldAndOfFCmps(LHS, RHS))
return replaceInstUsesWith(I, Res);
+ if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
+ return CastedAnd;
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
Value *Op0COp = Op0C->getOperand(0);
Type *SrcTy = Op0COp->getType();
- // fold (and (cast A), (cast B)) -> (cast (and A, B))
- if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
- if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
- SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVectorTy()) {
- Value *Op1COp = Op1C->getOperand(0);
-
- // Only do this if the casts both really cause code to be generated.
- if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
- ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
- Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
- return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
- }
-
- // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
- if (Value *Res = FoldAndOfICmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
-
- // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
- if (Value *Res = FoldAndOfFCmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
- }
- }
// If we are masking off the sign bit of a floating-point value, convert
// this to the canonical fabs intrinsic call and cast back to integer.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index cafe8f6be03..c251683d514 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -390,6 +390,7 @@ private:
Value *EmitGEPOffset(User *GEP);
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
+ Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
public:
/// \brief Inserts an instruction \p New before instruction \p Old
OpenPOWER on IntegriCloud