diff options
Diffstat (limited to 'llvm/lib/Transforms/InstCombine')
12 files changed, 85 insertions, 141 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 12ceab3aa16..38a0b4a1e9a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1035,7 +1035,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(), - I.hasNoUnsignedWrap(), DL, &TLI, &DT)) + I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // (A*B)+(A*C) -> A*(B+C) etc @@ -1154,7 +1154,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { return replaceInstUsesWith(I, V); // A+B --> A|B iff A and B have no bits set in common. - if (haveNoCommonBitsSet(LHS, RHS, DL, &I, &DT)) + if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT)) return BinaryOperator::CreateOr(LHS, RHS); if (Constant *CRHS = dyn_cast<Constant>(RHS)) { @@ -1317,7 +1317,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = - SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, &TLI, &DT)) + SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (isa<Constant>(RHS)) { @@ -1493,7 +1493,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(), - I.hasNoUnsignedWrap(), DL, &TLI, &DT)) + I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // (A*B)-(A*C) -> A*(B-C) etc @@ -1704,7 +1704,7 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = - SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT)) + SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // fsub nsz 0, X ==> fsub nsz -0.0, X diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index cbcd459f582..2ab76d6f8ea 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1355,7 +1355,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifyAndInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifyAndInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // (A|B)&(A|C) -> A|(B&C) etc @@ -1741,17 +1741,17 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Value *Mask = nullptr; Value *Masked = nullptr; if (LAnd->getOperand(0) == RAnd->getOperand(0) && - isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, CxtI, + isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, &AC, CxtI, &DT) && - isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, CxtI, + isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, &AC, CxtI, &DT)) { Mask = Builder->CreateOr(LAnd->getOperand(1), RAnd->getOperand(1)); Masked = Builder->CreateAnd(LAnd->getOperand(0), Mask); } else if (LAnd->getOperand(1) == RAnd->getOperand(1) && - isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, CxtI, - &DT) && - isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, CxtI, - &DT)) { + isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, &AC, + CxtI, &DT) && + isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, &AC, + CxtI, &DT)) { Mask = Builder->CreateOr(LAnd->getOperand(0), RAnd->getOperand(0)); Masked = Builder->CreateAnd(LAnd->getOperand(1), Mask); } @@ -2175,7 +2175,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifyOrInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifyOrInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // (A&B)|(A&C) -> A&(B|C) etc @@ -2533,7 +2533,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifyXorInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifyXorInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // (A&B)^(A&C) -> A&(B^C) etc diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index cb741fba43b..4d432d56aa3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -109,8 +109,8 @@ static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) { } Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { - unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, &DT); - unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, &DT); + unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, &AC, &DT); + unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, &AC, &DT); unsigned MinAlign = std::min(DstAlign, SrcAlign); unsigned CopyAlign = MI->getAlignment(); @@ -210,7 +210,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { } Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { - unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &DT); + unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); if (MI->getAlignment() < Alignment) { MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Alignment, false)); @@ -1358,7 +1358,7 @@ Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) { Instruction *InstCombiner::visitCallInst(CallInst &CI) { auto Args = CI.arg_operands(); if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL, - &TLI, &DT)) + &TLI, &DT, &AC)) return replaceInstUsesWith(CI, V); if (isFreeCall(&CI, &TLI)) @@ -1558,7 +1558,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { case Intrinsic::ppc_altivec_lvx: case Intrinsic::ppc_altivec_lvxl: // Turn PPC lvx -> load if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, + if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, &DT) >= 16) { Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); @@ -1575,7 +1575,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { case Intrinsic::ppc_altivec_stvx: case Intrinsic::ppc_altivec_stvxl: // Turn stvx -> store if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, + if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, &DT) >= 16) { Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType()); @@ -1592,7 +1592,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { } case Intrinsic::ppc_qpx_qvlfs: // Turn PPC QPX qvlfs -> load if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, + if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, &DT) >= 16) { Type *VTy = VectorType::get(Builder->getFloatTy(), II->getType()->getVectorNumElements()); @@ -1604,7 +1604,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; case Intrinsic::ppc_qpx_qvlfd: // Turn PPC QPX qvlfd -> load if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, + if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC, &DT) >= 32) { Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); @@ -1613,7 +1613,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; case Intrinsic::ppc_qpx_qvstfs: // Turn PPC QPX qvstfs -> store if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, + if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, &DT) >= 16) { Type *VTy = VectorType::get(Builder->getFloatTy(), II->getArgOperand(0)->getType()->getVectorNumElements()); @@ -1625,7 +1625,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; case Intrinsic::ppc_qpx_qvstfd: // Turn PPC QPX qvstfd -> store if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, + if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC, &DT) >= 32) { Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType()); @@ -2249,7 +2249,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { case Intrinsic::arm_neon_vst3lane: case Intrinsic::arm_neon_vst4lane: { unsigned MemAlign = - getKnownAlignment(II->getArgOperand(0), DL, II, &DT); + getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT); unsigned AlignArg = II->getNumArgOperands() - 1; ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { @@ -2528,78 +2528,6 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { if (KnownOne.isAllOnesValue()) return eraseInstFromFunction(*II); - // For assumptions, add to the associated operand bundle the values to which - // the assumption might apply. - // Note: This code must be kept in-sync with the code in - // computeKnownBitsFromAssume in ValueTracking. - SmallVector<Value *, 16> Affected; - auto AddAffected = [&Affected](Value *V) { - if (isa<Argument>(V)) { - Affected.push_back(V); - } else if (auto *I = dyn_cast<Instruction>(V)) { - Affected.push_back(I); - - if (I->getOpcode() == Instruction::BitCast || - I->getOpcode() == Instruction::PtrToInt) { - V = I->getOperand(0); - if (isa<Instruction>(V) || isa<Argument>(V)) - Affected.push_back(V); - } - } - }; - - CmpInst::Predicate Pred; - if (match(IIOperand, m_ICmp(Pred, m_Value(A), m_Value(B)))) { - AddAffected(A); - AddAffected(B); - - if (Pred == ICmpInst::ICMP_EQ) { - // For equality comparisons, we handle the case of bit inversion. - auto AddAffectedFromEq = [&AddAffected](Value *V) { - Value *A; - if (match(V, m_Not(m_Value(A)))) { - AddAffected(A); - V = A; - } - - Value *B; - ConstantInt *C; - if (match(V, - m_CombineOr(m_And(m_Value(A), m_Value(B)), - m_CombineOr(m_Or(m_Value(A), m_Value(B)), - m_Xor(m_Value(A), m_Value(B)))))) { - AddAffected(A); - AddAffected(B); - } else if (match(V, - m_CombineOr(m_Shl(m_Value(A), m_ConstantInt(C)), - m_CombineOr(m_LShr(m_Value(A), m_ConstantInt(C)), - m_AShr(m_Value(A), - m_ConstantInt(C)))))) { - AddAffected(A); - } - }; - - AddAffectedFromEq(A); - AddAffectedFromEq(B); - } - } - - // If the list of affected values is the same as the existing list then - // there's nothing more to do here. - if (!Affected.empty()) - if (auto OB = CI.getOperandBundle("affected")) - if (Affected.size() == OB.getValue().Inputs.size() && - std::equal(Affected.begin(), Affected.end(), - OB.getValue().Inputs.begin())) - Affected.clear(); - - if (!Affected.empty()) { - Builder->CreateCall(AssumeIntrinsic, IIOperand, - OperandBundleDef("affected", Affected), - II->getName()); - return eraseInstFromFunction(*II); - } - break; } case Intrinsic::experimental_gc_relocate: { diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 9ffb47efbfb..6c4f4a1ce31 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4156,7 +4156,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { } if (Value *V = - SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, &TLI, &DT, &I)) + SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, &TLI, &DT, &AC, &I)) return replaceInstUsesWith(I, V); // comparing -val or val with non-zero is the same as just comparing val @@ -4321,7 +4321,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // if A is a power of 2. if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && match(Op1, m_Zero()) && - isKnownToBeAPowerOfTwo(A, DL, false, 0, &I, &DT) && I.isEquality()) + isKnownToBeAPowerOfTwo(A, DL, false, 0, &AC, &I, &DT) && I.isEquality()) return new ICmpInst(I.getInversePredicate(), Builder->CreateAnd(A, B), Op1); @@ -4644,7 +4644,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, - I.getFastMathFlags(), DL, &TLI, &DT, &I)) + I.getFastMathFlags(), DL, &TLI, &DT, &AC, &I)) return replaceInstUsesWith(I, V); // Simplify 'fcmp pred X, X' diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 24ba412ca99..c30598af423 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -16,6 +16,7 @@ #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/TargetFolder.h" #include "llvm/Analysis/ValueTracking.h" @@ -178,6 +179,7 @@ private: AliasAnalysis *AA; // Required analyses. + AssumptionCache &AC; TargetLibraryInfo &TLI; DominatorTree &DT; const DataLayout &DL; @@ -191,17 +193,19 @@ private: public: InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder, bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA, - TargetLibraryInfo &TLI, DominatorTree &DT, const DataLayout &DL, - LoopInfo *LI) + AssumptionCache &AC, TargetLibraryInfo &TLI, + DominatorTree &DT, const DataLayout &DL, LoopInfo *LI) : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize), - ExpensiveCombines(ExpensiveCombines), AA(AA), TLI(TLI), DT(DT), DL(DL), - LI(LI), MadeIRChange(false) {} + ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT), + DL(DL), LI(LI), MadeIRChange(false) {} /// \brief Run the combiner over the entire worklist until it is empty. /// /// \returns true if the IR is changed. bool run(); + AssumptionCache &getAssumptionCache() const { return AC; } + const DataLayout &getDataLayout() const { return DL; } DominatorTree &getDominatorTree() const { return DT; } @@ -472,28 +476,30 @@ public: void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, unsigned Depth, Instruction *CxtI) const { - return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, CxtI, &DT); + return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI, + &DT); } bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0, Instruction *CxtI = nullptr) const { - return llvm::MaskedValueIsZero(V, Mask, DL, Depth, CxtI, &DT); + return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); } unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0, Instruction *CxtI = nullptr) const { - return llvm::ComputeNumSignBits(Op, DL, Depth, CxtI, &DT); + return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); } void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, unsigned Depth = 0, Instruction *CxtI = nullptr) const { - return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, CxtI, &DT); + return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI, + &DT); } OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS, const Instruction *CxtI) { - return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, CxtI, &DT); + return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); } OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, const Instruction *CxtI) { - return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, CxtI, &DT); + return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); } private: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 517c0d5b666..5276bee4e0a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -286,7 +286,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { SmallVector<Instruction *, 4> ToDelete; if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { unsigned SourceAlign = getOrEnforceKnownAlignment( - Copy->getSource(), AI.getAlignment(), DL, &AI, &DT); + Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); if (AI.getAlignment() <= SourceAlign) { DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); @@ -826,7 +826,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { // Attempt to improve the alignment. unsigned KnownAlign = getOrEnforceKnownAlignment( - Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &DT); + Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); unsigned LoadAlign = LI.getAlignment(); unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); @@ -1199,7 +1199,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { // Attempt to improve the alignment. unsigned KnownAlign = getOrEnforceKnownAlignment( - Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &DT); + Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT); unsigned StoreAlign = SI.getAlignment(); unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 8374437dc79..c70d7585cac 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -48,7 +48,8 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, BinaryOperator *I = dyn_cast<BinaryOperator>(V); if (I && I->isLogicalShift() && isKnownToBeAPowerOfTwo(I->getOperand(0), IC.getDataLayout(), false, 0, - &CxtI, &IC.getDominatorTree())) { + &IC.getAssumptionCache(), &CxtI, + &IC.getDominatorTree())) { // We know that this is an exact/nuw shift and that the input is a // non-zero context as well. if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { @@ -178,7 +179,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifyMulInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifyMulInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (Value *V = SimplifyUsingDistributiveLaws(I)) @@ -544,7 +545,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) { std::swap(Op0, Op1); if (Value *V = - SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT)) + SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); bool AllowReassociate = I.hasUnsafeAlgebra(); @@ -1060,7 +1061,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifyUDivInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifyUDivInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // Handle the integer div common cases @@ -1133,7 +1134,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifySDivInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifySDivInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // Handle the integer div common cases @@ -1196,7 +1197,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { return BO; } - if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &I, &DT)) { + if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) { // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) // Safe because the only negative value (1 << Y) can take on is // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have @@ -1248,7 +1249,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = SimplifyFDivInst(Op0, Op1, I.getFastMathFlags(), - DL, &TLI, &DT)) + DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (isa<Constant>(Op0)) @@ -1422,7 +1423,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifyURemInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifyURemInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (Instruction *common = commonIRemTransforms(I)) @@ -1435,7 +1436,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { I.getType()); // X urem Y -> X and Y-1, where Y is a power of 2, - if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &I, &DT)) { + if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) { Constant *N1 = Constant::getAllOnesValue(I.getType()); Value *Add = Builder->CreateAdd(Op1, N1); return BinaryOperator::CreateAnd(Op0, Add); @@ -1465,7 +1466,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) { if (Value *V = SimplifyVectorOp(I)) return replaceInstUsesWith(I, V); - if (Value *V = SimplifySRemInst(Op0, Op1, DL, &TLI, &DT)) + if (Value *V = SimplifySRemInst(Op0, Op1, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // Handle the integer rem common cases @@ -1541,7 +1542,7 @@ Instruction *InstCombiner::visitFRem(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = SimplifyFRemInst(Op0, Op1, I.getFastMathFlags(), - DL, &TLI, &DT)) + DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); // Handle cases involving: rem X, (select Cond, Y, Z) diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp index a2f593ac41b..184897f751f 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -880,7 +880,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { // PHINode simplification // Instruction *InstCombiner::visitPHINode(PHINode &PN) { - if (Value *V = SimplifyInstruction(&PN, DL, &TLI, &DT)) + if (Value *V = SimplifyInstruction(&PN, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(PN, V); if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN)) @@ -937,7 +937,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { Instruction *CtxI = PN.getIncomingBlock(i)->getTerminator(); Value *VA = PN.getIncomingValue(i); - if (isKnownNonZero(VA, DL, 0, CtxI, &DT)) { + if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) { if (!NonZeroConst) NonZeroConst = GetAnyNonZeroConstInt(PN); PN.setIncomingValue(i, NonZeroConst); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 96a531bb254..36644845352 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -1101,7 +1101,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { Type *SelType = SI.getType(); if (Value *V = - SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, &TLI, &DT)) + SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(SI, V); if (Instruction *I = canonicalizeSelectToShuffle(SI)) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index fa7a7c6bc73..bc38c4aca34 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -722,7 +722,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) { if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1), I.hasNoSignedWrap(), - I.hasNoUnsignedWrap(), DL, &TLI, &DT)) + I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (Instruction *V = commonShiftTransforms(I)) @@ -763,7 +763,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(), - DL, &TLI, &DT)) + DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (Instruction *R = commonShiftTransforms(I)) @@ -807,7 +807,7 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) { return replaceInstUsesWith(I, V); if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(), - DL, &TLI, &DT)) + DL, &TLI, &DT, &AC)) return replaceInstUsesWith(I, V); if (Instruction *R = commonShiftTransforms(I)) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index ee21d6c7e72..732a786ceb7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -145,7 +145,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) { Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { if (Value *V = SimplifyExtractElementInst( - EI.getVectorOperand(), EI.getIndexOperand(), DL, &TLI, &DT)) + EI.getVectorOperand(), EI.getIndexOperand(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(EI, V); // If vector val is constant with all elements the same, replace EI with diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index ab1869b69b9..0d29c1dceea 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -40,6 +40,7 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/ConstantFolding.h" @@ -682,14 +683,14 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { if (SI0->getCondition() == SI1->getCondition()) { Value *SI = nullptr; if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(), - SI1->getFalseValue(), DL, &TLI, &DT)) + SI1->getFalseValue(), DL, &TLI, &DT, &AC)) SI = Builder->CreateSelect(SI0->getCondition(), Builder->CreateBinOp(TopLevelOpcode, SI0->getTrueValue(), SI1->getTrueValue()), V); if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(), - SI1->getTrueValue(), DL, &TLI, &DT)) + SI1->getTrueValue(), DL, &TLI, &DT, &AC)) SI = Builder->CreateSelect( SI0->getCondition(), V, Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(), @@ -1373,7 +1374,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); if (Value *V = - SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, &TLI, &DT)) + SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, &TLI, &DT, &AC)) return replaceInstUsesWith(GEP, V); Value *PtrOp = GEP.getOperand(0); @@ -2288,7 +2289,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { return replaceInstUsesWith(EV, Agg); if (Value *V = - SimplifyExtractValueInst(Agg, EV.getIndices(), DL, &TLI, &DT)) + SimplifyExtractValueInst(Agg, EV.getIndices(), DL, &TLI, &DT, &AC)) return replaceInstUsesWith(EV, V); if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { @@ -3114,8 +3115,8 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL, static bool combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist, - AliasAnalysis *AA, TargetLibraryInfo &TLI, - DominatorTree &DT, + AliasAnalysis *AA, AssumptionCache &AC, + TargetLibraryInfo &TLI, DominatorTree &DT, bool ExpensiveCombines = true, LoopInfo *LI = nullptr) { auto &DL = F.getParent()->getDataLayout(); @@ -3125,8 +3126,12 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist, /// instructions into the worklist when they are created. IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder( F.getContext(), TargetFolder(DL), - IRBuilderCallbackInserter([&Worklist](Instruction *I) { + IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) { Worklist.Add(I); + + using namespace llvm::PatternMatch; + if (match(I, m_Intrinsic<Intrinsic::assume>())) + AC.registerAssumption(cast<CallInst>(I)); })); // Lower dbg.declare intrinsics otherwise their value may be clobbered @@ -3143,7 +3148,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist, bool Changed = prepareICWorklistFromFunction(F, DL, &TLI, Worklist); InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines, - AA, TLI, DT, DL, LI); + AA, AC, TLI, DT, DL, LI); Changed |= IC.run(); if (!Changed) @@ -3155,13 +3160,14 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist, PreservedAnalyses InstCombinePass::run(Function &F, FunctionAnalysisManager &AM) { + auto &AC = AM.getResult<AssumptionAnalysis>(F); auto &DT = AM.getResult<DominatorTreeAnalysis>(F); auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); auto *LI = AM.getCachedResult<LoopAnalysis>(F); // FIXME: The AliasAnalysis is not yet supported in the new pass manager - if (!combineInstructionsOverFunction(F, Worklist, nullptr, TLI, DT, + if (!combineInstructionsOverFunction(F, Worklist, nullptr, AC, TLI, DT, ExpensiveCombines, LI)) // No changes, all analyses are preserved. return PreservedAnalyses::all(); @@ -3176,6 +3182,7 @@ PreservedAnalyses InstCombinePass::run(Function &F, void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired<AAResultsWrapperPass>(); + AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); @@ -3190,6 +3197,7 @@ bool InstructionCombiningPass::runOnFunction(Function &F) { // Required analyses. auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); + auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); @@ -3197,13 +3205,14 @@ bool InstructionCombiningPass::runOnFunction(Function &F) { auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; - return combineInstructionsOverFunction(F, Worklist, AA, TLI, DT, + return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, DT, ExpensiveCombines, LI); } char InstructionCombiningPass::ID = 0; INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine", "Combine redundant instructions", false, false) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |