diff options
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 57 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineInternal.h | 1 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVN.cpp | 17 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/JumpThreading.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SCCP.cpp | 13 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/BasicBlockUtils.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/InlineFunction.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/Local.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/LoopSimplify.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/LoopUtils.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 13 |
13 files changed, 126 insertions, 35 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index b34b3fd1619..ab7c3a5e294 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements the visitCall and visitInvoke functions. +// This file implements the visitCall, visitInvoke, and visitCallBr functions. // //===----------------------------------------------------------------------===// @@ -1834,8 +1834,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); if (!II) return visitCallBase(CI); - // Intrinsics cannot occur in an invoke, so handle them here instead of in - // visitCallBase. + // Intrinsics cannot occur in an invoke or a callbr, so handle them here + // instead of in visitCallBase. if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { bool Changed = false; @@ -4017,6 +4017,11 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { return visitCallBase(II); } +// CallBrInst simplification +Instruction *InstCombiner::visitCallBrInst(CallBrInst &CBI) { + return visitCallBase(CBI); +} + /// If this cast does not affect the value passed through the varargs area, we /// can eliminate the use of the cast. static bool isSafeToEliminateVarargsCast(const CallBase &Call, @@ -4145,7 +4150,7 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) { return nullptr; } -/// Improvements for call and invoke instructions. +/// Improvements for call, callbr and invoke instructions. Instruction *InstCombiner::visitCallBase(CallBase &Call) { if (isAllocLikeFn(&Call, &TLI)) return visitAllocSite(Call); @@ -4178,7 +4183,7 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { } // If the callee is a pointer to a function, attempt to move any casts to the - // arguments of the call/invoke. + // arguments of the call/callbr/invoke. Value *Callee = Call.getCalledValue(); if (!isa<Function>(Callee) && transformConstExprCastCall(Call)) return nullptr; @@ -4211,9 +4216,9 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { if (isa<CallInst>(OldCall)) return eraseInstFromFunction(*OldCall); - // We cannot remove an invoke, because it would change the CFG, just - // change the callee to a null pointer. - cast<InvokeInst>(OldCall)->setCalledFunction( + // We cannot remove an invoke or a callbr, because it would change thexi + // CFG, just change the callee to a null pointer. + cast<CallBase>(OldCall)->setCalledFunction( CalleeF->getFunctionType(), Constant::getNullValue(CalleeF->getType())); return nullptr; @@ -4228,8 +4233,8 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { if (!Call.getType()->isVoidTy()) replaceInstUsesWith(Call, UndefValue::get(Call.getType())); - if (isa<InvokeInst>(Call)) { - // Can't remove an invoke because we cannot change the CFG. + if (Call.isTerminator()) { + // Can't remove an invoke or callbr because we cannot change the CFG. return nullptr; } @@ -4282,7 +4287,7 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) { } /// If the callee is a constexpr cast of a function, attempt to move the cast to -/// the arguments of the call/invoke. +/// the arguments of the call/callbr/invoke. bool InstCombiner::transformConstExprCastCall(CallBase &Call) { auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts()); if (!Callee) @@ -4333,17 +4338,21 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) { return false; // Attribute not compatible with transformed value. } - // If the callbase is an invoke instruction, and the return value is used by - // a PHI node in a successor, we cannot change the return type of the call - // because there is no place to put the cast instruction (without breaking - // the critical edge). Bail out in this case. - if (!Caller->use_empty()) + // If the callbase is an invoke/callbr instruction, and the return value is + // used by a PHI node in a successor, we cannot change the return type of + // the call because there is no place to put the cast instruction (without + // breaking the critical edge). Bail out in this case. + if (!Caller->use_empty()) { if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) for (User *U : II->users()) if (PHINode *PN = dyn_cast<PHINode>(U)) if (PN->getParent() == II->getNormalDest() || PN->getParent() == II->getUnwindDest()) return false; + // FIXME: Be conservative for callbr to avoid a quadratic search. + if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) + return false; + } } unsigned NumActualArgs = Call.arg_size(); @@ -4497,6 +4506,9 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) { if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles); + } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { + NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(), + CBI->getIndirectDests(), Args, OpBundles); } else { NewCall = Builder.CreateCall(Callee, Args, OpBundles); cast<CallInst>(NewCall)->setTailCallKind( @@ -4520,11 +4532,14 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) { NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); NC->setDebugLoc(Caller->getDebugLoc()); - // If this is an invoke instruction, we should insert it after the first - // non-phi, instruction in the normal successor block. + // If this is an invoke/callbr instruction, we should insert it after the + // first non-phi instruction in the normal successor block. if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); InsertNewInstBefore(NC, *I); + } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { + BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt(); + InsertNewInstBefore(NC, *I); } else { // Otherwise, it's a call, just insert cast right after the call. InsertNewInstBefore(NC, *Caller); @@ -4673,6 +4688,12 @@ InstCombiner::transformCallThroughTrampoline(CallBase &Call, NewArgs, OpBundles); cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); + } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { + NewCaller = + CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(), + CBI->getIndirectDests(), NewArgs, OpBundles); + cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); + cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); } else { NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles); cast<CallInst>(NewCaller)->setTailCallKind( diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 951e0e72e9e..5b0c7fce0d1 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -392,6 +392,7 @@ public: Instruction *visitSelectInst(SelectInst &SI); Instruction *visitCallInst(CallInst &CI); Instruction *visitInvokeInst(InvokeInst &II); + Instruction *visitCallBrInst(CallBrInst &CBI); Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); Instruction *visitPHINode(PHINode &PN); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 1f40c8f5a4a..4d04e3ff99e 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -921,8 +921,8 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) { // If the InVal is an invoke at the end of the pred block, then we can't // insert a computation after it without breaking the edge. - if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) - if (II->getParent() == NonConstBB) + if (isa<InvokeInst>(InVal)) + if (cast<Instruction>(InVal)->getParent() == NonConstBB) return nullptr; // If the incoming non-constant value is in I's block, we will remove one diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 7595ae05787..a02f32f5643 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -1131,6 +1131,14 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, return false; } + // FIXME: Can we support the fallthrough edge? + if (isa<CallBrInst>(Pred->getTerminator())) { + LLVM_DEBUG( + dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '" + << Pred->getName() << "': " << *LI << '\n'); + return false; + } + if (LoadBB->isEHPad()) { LLVM_DEBUG( dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" @@ -2167,8 +2175,8 @@ bool GVN::performScalarPRE(Instruction *CurInst) { return false; // We don't currently value number ANY inline asm calls. - if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) - if (CallI->isInlineAsm()) + if (auto *CallB = dyn_cast<CallBase>(CurInst)) + if (CallB->isInlineAsm()) return false; uint32_t ValNo = VN.lookup(CurInst); @@ -2251,6 +2259,11 @@ bool GVN::performScalarPRE(Instruction *CurInst) { if (isa<IndirectBrInst>(PREPred->getTerminator())) return false; + // Don't do PRE across callbr. + // FIXME: Can we do this across the fallthrough edge? + if (isa<CallBrInst>(PREPred->getTerminator())) + return false; + // We can't do PRE safely on a critical edge, so instead we schedule // the edge to be split and perform the PRE the next time we iterate // on the function. diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp index 7738a79425b..f74f7e28e52 100644 --- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -1055,7 +1055,7 @@ bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) { Condition = IB->getAddress()->stripPointerCasts(); Preference = WantBlockAddress; } else { - return false; // Must be an invoke. + return false; // Must be an invoke or callbr. } // Run constant folding to see if we can reduce the condition to a simple @@ -1428,7 +1428,9 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) { // Add all the unavailable predecessors to the PredsToSplit list. for (BasicBlock *P : predecessors(LoadBB)) { // If the predecessor is an indirect goto, we can't split the edge. - if (isa<IndirectBrInst>(P->getTerminator())) + // Same for CallBr. + if (isa<IndirectBrInst>(P->getTerminator()) || + isa<CallBrInst>(P->getTerminator())) return false; if (!AvailablePredSet.count(P)) @@ -1641,8 +1643,9 @@ bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB, ++PredWithKnownDest; // If the predecessor ends with an indirect goto, we can't change its - // destination. - if (isa<IndirectBrInst>(Pred->getTerminator())) + // destination. Same for CallBr. + if (isa<IndirectBrInst>(Pred->getTerminator()) || + isa<CallBrInst>(Pred->getTerminator())) continue; PredToDestList.push_back(std::make_pair(Pred, DestBB)); diff --git a/llvm/lib/Transforms/Scalar/SCCP.cpp b/llvm/lib/Transforms/Scalar/SCCP.cpp index 5dd7f43e662..39d294f8602 100644 --- a/llvm/lib/Transforms/Scalar/SCCP.cpp +++ b/llvm/lib/Transforms/Scalar/SCCP.cpp @@ -638,6 +638,11 @@ private: visitTerminator(II); } + void visitCallBrInst (CallBrInst &CBI) { + visitCallSite(&CBI); + visitTerminator(CBI); + } + void visitCallSite (CallSite CS); void visitResumeInst (ResumeInst &I) { /*returns void*/ } void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ } @@ -733,6 +738,13 @@ void SCCPSolver::getFeasibleSuccessors(Instruction &TI, return; } + // In case of callbr, we pessimistically assume that all successors are + // feasible. + if (isa<CallBrInst>(&TI)) { + Succs.assign(TI.getNumSuccessors(), true); + return; + } + LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); llvm_unreachable("SCCP: Don't know how to handle this terminator!"); } @@ -1597,6 +1609,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { return true; case Instruction::Call: case Instruction::Invoke: + case Instruction::CallBr: // There are two reasons a call can have an undef result // 1. It could be tracked. // 2. It could be constant-foldable. diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp index 41ad4fefe1f..2410f652fd7 100644 --- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp +++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp @@ -549,6 +549,8 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB, // all BlockAddress uses would need to be updated. assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) && "Cannot split an edge from an IndirectBrInst"); + assert(!isa<CallBrInst>(Preds[i]->getTerminator()) && + "Cannot split an edge from a CallBrInst"); Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB); } diff --git a/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp index ab604a6c57c..d73fefdf9c9 100644 --- a/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp +++ b/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp @@ -144,6 +144,10 @@ llvm::SplitCriticalEdge(Instruction *TI, unsigned SuccNum, // it in this generic function. if (DestBB->isEHPad()) return nullptr; + // Don't split the non-fallthrough edge from a callbr. + if (isa<CallBrInst>(TI) && SuccNum > 0) + return nullptr; + // Create a new basic block, linking it into the CFG. BasicBlock *NewBB = BasicBlock::Create(TI->getContext(), TIBB->getName() + "." + DestBB->getName() + "_crit_edge"); diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 9015b36fa35..7443a7f9c5e 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1504,6 +1504,10 @@ llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, assert(TheCall->getParent() && TheCall->getFunction() && "Instruction not in function!"); + // FIXME: we don't inline callbr yet. + if (isa<CallBrInst>(TheCall)) + return false; + // If IFI has any state in it, zap it before we fill it in. IFI.reset(); @@ -1729,6 +1733,8 @@ llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, Instruction *NewI = nullptr; if (isa<CallInst>(I)) NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); + else if (isa<CallBrInst>(I)) + NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I); else NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); @@ -2031,6 +2037,8 @@ llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, Instruction *NewInst; if (CS.isCall()) NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); + else if (CS.isCallBr()) + NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I); else NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); NewInst->takeName(I); diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 70812dc2de8..062bbcdae2c 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -996,6 +996,18 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, } } + // We cannot fold the block if it's a branch to an already present callbr + // successor because that creates duplicate successors. + for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { + if (auto *CBI = dyn_cast<CallBrInst>((*I)->getTerminator())) { + if (Succ == CBI->getDefaultDest()) + return false; + for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) + if (Succ == CBI->getIndirectDest(i)) + return false; + } + } + LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); SmallVector<DominatorTree::UpdateType, 32> Updates; diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp index b2aa20bc0f8..954e8038dfb 100644 --- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp +++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp @@ -27,6 +27,9 @@ // to transform the loop and make these guarantees. Client code should check // that these conditions are true before relying on them. // +// Similar complications arise from callbr instructions, particularly in +// asm-goto where blockaddress expressions are used. +// // Note that the simplifycfg pass will clean up blocks which are split out but // end up being unnecessary, so usage of this pass should not pessimize // generated code. @@ -123,10 +126,11 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT, PI != PE; ++PI) { BasicBlock *P = *PI; if (!L->contains(P)) { // Coming in from outside the loop? - // If the loop is branched to from an indirect branch, we won't + // If the loop is branched to from an indirect terminator, we won't // be able to fully transform the loop, because it prohibits // edge splitting. - if (isa<IndirectBrInst>(P->getTerminator())) return nullptr; + if (P->getTerminator()->isIndirectTerminator()) + return nullptr; // Keep track of it. OutsideBlocks.push_back(P); @@ -235,8 +239,8 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader, for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { if (PN->getIncomingValue(i) != PN || !L->contains(PN->getIncomingBlock(i))) { - // We can't split indirectbr edges. - if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator())) + // We can't split indirect control flow edges. + if (PN->getIncomingBlock(i)->getTerminator()->isIndirectTerminator()) return nullptr; OuterLoopPreds.push_back(PN->getIncomingBlock(i)); } @@ -357,8 +361,8 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader, for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){ BasicBlock *P = *I; - // Indirectbr edges cannot be split, so we must fail if we find one. - if (isa<IndirectBrInst>(P->getTerminator())) + // Indirect edges cannot be split, so we must fail if we find one. + if (P->getTerminator()->isIndirectTerminator()) return nullptr; if (P != Preheader) BackedgeBlocks.push_back(P); diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index 5e661ae8c21..5539ff12e4a 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -65,6 +65,9 @@ bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI, if (isa<IndirectBrInst>(PredBB->getTerminator())) // We cannot rewrite exiting edges from an indirectbr. return false; + if (isa<CallBrInst>(PredBB->getTerminator())) + // We cannot rewrite exiting edges from a callbr. + return false; InLoopPredecessors.push_back(PredBB); } else { diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 3fec17ac8cc..00bcb8479c3 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1265,8 +1265,10 @@ static bool HoistThenElseCodeToIf(BranchInst *BI, while (isa<DbgInfoIntrinsic>(I2)) I2 = &*BB2_Itr++; } + // FIXME: Can we define a safety predicate for CallBr? if (isa<PHINode>(I1) || !I1->isIdenticalToWhenDefined(I2) || - (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2))) + (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) || + isa<CallBrInst>(I1)) return false; BasicBlock *BIParent = BI->getParent(); @@ -1349,9 +1351,14 @@ static bool HoistThenElseCodeToIf(BranchInst *BI, HoistTerminator: // It may not be possible to hoist an invoke. + // FIXME: Can we define a safety predicate for CallBr? if (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) return Changed; + // TODO: callbr hoisting currently disabled pending further study. + if (isa<CallBrInst>(I1)) + return Changed; + for (BasicBlock *Succ : successors(BB1)) { for (PHINode &PN : Succ->phis()) { Value *BB1V = PN.getIncomingValueForBlock(BB1); @@ -1443,7 +1450,7 @@ static bool canSinkInstructions( // Conservatively return false if I is an inline-asm instruction. Sinking // and merging inline-asm instructions can potentially create arguments // that cannot satisfy the inline-asm constraints. - if (const auto *C = dyn_cast<CallInst>(I)) + if (const auto *C = dyn_cast<CallBase>(I)) if (C->isInlineAsm()) return false; @@ -1506,7 +1513,7 @@ static bool canSinkInstructions( // We can't create a PHI from this GEP. return false; // Don't create indirect calls! The called value is the final operand. - if ((isa<CallInst>(I0) || isa<InvokeInst>(I0)) && OI == OE - 1) { + if (isa<CallBase>(I0) && OI == OE - 1) { // FIXME: if the call was *already* indirect, we should do this. return false; } |