diff options
author | Sanjay Patel <spatel@rotateright.com> | 2016-02-23 17:41:34 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2016-02-23 17:41:34 +0000 |
commit | 713f25e0f8957c3c7d6b5a9d82ef1947962662cc (patch) | |
tree | 0421bc0b73d20bd41b342b4300a5138c68521491 /llvm/lib/Transforms/InstCombine | |
parent | b2be09802638802deebace5237f1d804977517d0 (diff) | |
download | bcm5719-llvm-713f25e0f8957c3c7d6b5a9d82ef1947962662cc.tar.gz bcm5719-llvm-713f25e0f8957c3c7d6b5a9d82ef1947962662cc.zip |
[InstCombine] improve readability ; NFCI
Less indenting, named local variables, more descriptive names.
llvm-svn: 261659
Diffstat (limited to 'llvm/lib/Transforms/InstCombine')
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 66 |
1 files changed, 36 insertions, 30 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index eb41d38c307..b38ec7007de 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1245,40 +1245,46 @@ static Instruction *matchDeMorgansLaws(BinaryOperator &I, Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) { Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); - CastInst *Op0C = dyn_cast<CastInst>(Op0); - CastInst *Op1C = dyn_cast<CastInst>(Op1); - if (!Op0C || !Op1C) + CastInst *Cast0 = dyn_cast<CastInst>(Op0); + CastInst *Cast1 = dyn_cast<CastInst>(Op1); + if (!Cast0 || !Cast1) return nullptr; - Value *Op0COp = Op0C->getOperand(0); - Type *SrcTy = Op0COp->getType(); + // The casts must be of the same type, and this must be a cast from an integer + // or integer vector source type. + auto CastOpcode = Cast0->getOpcode(); + Type *SrcTy = Cast0->getSrcTy(); + if ((CastOpcode != Cast1->getOpcode()) || (SrcTy != Cast1->getSrcTy()) || + !SrcTy->isIntOrIntVectorTy()) + return nullptr; + + Value *Cast0Src = Cast0->getOperand(0); + Value *Cast1Src = Cast1->getOperand(0); + Type *DestTy = I.getType(); + // fold (and (cast A), (cast B)) -> (cast (and A, B)) - if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ? - SrcTy == Op1C->getOperand(0)->getType() && - SrcTy->isIntOrIntVectorTy()) { - Value *Op1COp = Op1C->getOperand(0); - - // Only do this if the casts both really cause code to be generated. - if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && - ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { - Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName()); - return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); - } - - // If this is and(cast(icmp), cast(icmp)), try to fold this even if the - // cast is otherwise not optimizable. This happens for vector sexts. - if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp)) - if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp)) - if (Value *Res = FoldAndOfICmps(LHS, RHS)) - return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); - - // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the - // cast is otherwise not optimizable. This happens for vector sexts. - if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp)) - if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp)) - if (Value *Res = FoldAndOfFCmps(LHS, RHS)) - return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); + + // Only do this if the casts both really cause code to be generated. + if (ShouldOptimizeCast(CastOpcode, Cast0Src, DestTy) && + ShouldOptimizeCast(CastOpcode, Cast1Src, DestTy)) { + Value *NewOp = Builder->CreateAnd(Cast0Src, Cast1Src, I.getName()); + return CastInst::Create(CastOpcode, NewOp, DestTy); } + + // If this is and(cast(icmp), cast(icmp)), try to fold this even if the + // cast is otherwise not optimizable. This happens for vector sexts. + if (ICmpInst *RHS = dyn_cast<ICmpInst>(Cast1Src)) + if (ICmpInst *LHS = dyn_cast<ICmpInst>(Cast0Src)) + if (Value *Res = FoldAndOfICmps(LHS, RHS)) + return CastInst::Create(CastOpcode, Res, DestTy); + + // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the + // cast is otherwise not optimizable. This happens for vector sexts. + if (FCmpInst *RHS = dyn_cast<FCmpInst>(Cast1Src)) + if (FCmpInst *LHS = dyn_cast<FCmpInst>(Cast0Src)) + if (Value *Res = FoldAndOfFCmps(LHS, RHS)) + return CastInst::Create(CastOpcode, Res, DestTy); + return nullptr; } |