summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp1
-rw-r--r--llvm/lib/AsmParser/LLLexer.cpp1
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp127
-rw-r--r--llvm/lib/AsmParser/LLParser.h1
-rw-r--r--llvm/lib/AsmParser/LLToken.h1
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp68
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp35
-rw-r--r--llvm/lib/Bitcode/Writer/ValueEnumerator.cpp6
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp1
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp13
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp10
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp6
-rw-r--r--llvm/lib/CodeGen/IndirectBrExpandPass.cpp8
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp10
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp41
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp1
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp8
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp6
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp1
-rw-r--r--llvm/lib/IR/AsmWriter.cpp45
-rw-r--r--llvm/lib/IR/Instruction.cpp11
-rw-r--r--llvm/lib/IR/Instructions.cpp83
-rw-r--r--llvm/lib/IR/Value.cpp3
-rw-r--r--llvm/lib/IR/Verifier.cpp21
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp3
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp2
-rw-r--r--llvm/lib/Target/MSP430/MSP430InstrInfo.cpp3
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp5
-rw-r--r--llvm/lib/Target/X86/X86AsmPrinter.cpp5
-rw-r--r--llvm/lib/Target/X86/X86FloatingPoint.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp57
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineInternal.h1
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp17
-rw-r--r--llvm/lib/Transforms/Scalar/JumpThreading.cpp11
-rw-r--r--llvm/lib/Transforms/Scalar/SCCP.cpp13
-rw-r--r--llvm/lib/Transforms/Utils/BasicBlockUtils.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp8
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp12
-rw-r--r--llvm/lib/Transforms/Utils/LoopSimplify.cpp16
-rw-r--r--llvm/lib/Transforms/Utils/LoopUtils.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp13
55 files changed, 654 insertions, 74 deletions
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index eb49f904ea4..6f88a6b34ca 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -3922,6 +3922,7 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
case Instruction::VAArg:
case Instruction::Alloca:
case Instruction::Invoke:
+ case Instruction::CallBr:
case Instruction::PHI:
case Instruction::Store:
case Instruction::Ret:
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index b543115a88e..461117c92b8 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -858,6 +858,7 @@ lltok::Kind LLLexer::LexIdentifier() {
INSTKEYWORD(invoke, Invoke);
INSTKEYWORD(resume, Resume);
INSTKEYWORD(unreachable, Unreachable);
+ INSTKEYWORD(callbr, CallBr);
INSTKEYWORD(alloca, Alloca);
INSTKEYWORD(load, Load);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 855c5d26500..2a36bb18984 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -163,6 +163,14 @@ bool LLParser::ValidateEndOfModule() {
AS = AS.addAttributes(Context, AttributeList::FunctionIndex,
AttributeSet::get(Context, FnAttrs));
II->setAttributes(AS);
+ } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(V)) {
+ AttributeList AS = CBI->getAttributes();
+ AttrBuilder FnAttrs(AS.getFnAttributes());
+ AS = AS.removeAttributes(Context, AttributeList::FunctionIndex);
+ FnAttrs.merge(B);
+ AS = AS.addAttributes(Context, AttributeList::FunctionIndex,
+ AttributeSet::get(Context, FnAttrs));
+ CBI->setAttributes(AS);
} else if (auto *GV = dyn_cast<GlobalVariable>(V)) {
AttrBuilder Attrs(GV->getAttributes());
Attrs.merge(B);
@@ -5566,6 +5574,7 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_catchswitch: return ParseCatchSwitch(Inst, PFS);
case lltok::kw_catchpad: return ParseCatchPad(Inst, PFS);
case lltok::kw_cleanuppad: return ParseCleanupPad(Inst, PFS);
+ case lltok::kw_callbr: return ParseCallBr(Inst, PFS);
// Unary Operators.
case lltok::kw_fneg: {
FastMathFlags FMF = EatFastMathFlagsIfPresent();
@@ -6184,6 +6193,124 @@ bool LLParser::ParseUnaryOp(Instruction *&Inst, PerFunctionState &PFS,
return false;
}
+/// ParseCallBr
+/// ::= 'callbr' OptionalCallingConv OptionalAttrs Type Value ParamList
+/// OptionalAttrs OptionalOperandBundles 'to' TypeAndValue
+/// '[' LabelList ']'
+bool LLParser::ParseCallBr(Instruction *&Inst, PerFunctionState &PFS) {
+ LocTy CallLoc = Lex.getLoc();
+ AttrBuilder RetAttrs, FnAttrs;
+ std::vector<unsigned> FwdRefAttrGrps;
+ LocTy NoBuiltinLoc;
+ unsigned CC;
+ Type *RetType = nullptr;
+ LocTy RetTypeLoc;
+ ValID CalleeID;
+ SmallVector<ParamInfo, 16> ArgList;
+ SmallVector<OperandBundleDef, 2> BundleList;
+
+ BasicBlock *DefaultDest;
+ if (ParseOptionalCallingConv(CC) || ParseOptionalReturnAttrs(RetAttrs) ||
+ ParseType(RetType, RetTypeLoc, true /*void allowed*/) ||
+ ParseValID(CalleeID) || ParseParameterList(ArgList, PFS) ||
+ ParseFnAttributeValuePairs(FnAttrs, FwdRefAttrGrps, false,
+ NoBuiltinLoc) ||
+ ParseOptionalOperandBundles(BundleList, PFS) ||
+ ParseToken(lltok::kw_to, "expected 'to' in callbr") ||
+ ParseTypeAndBasicBlock(DefaultDest, PFS) ||
+ ParseToken(lltok::lsquare, "expected '[' in callbr"))
+ return true;
+
+ // Parse the destination list.
+ SmallVector<BasicBlock *, 16> IndirectDests;
+
+ if (Lex.getKind() != lltok::rsquare) {
+ BasicBlock *DestBB;
+ if (ParseTypeAndBasicBlock(DestBB, PFS))
+ return true;
+ IndirectDests.push_back(DestBB);
+
+ while (EatIfPresent(lltok::comma)) {
+ if (ParseTypeAndBasicBlock(DestBB, PFS))
+ return true;
+ IndirectDests.push_back(DestBB);
+ }
+ }
+
+ if (ParseToken(lltok::rsquare, "expected ']' at end of block list"))
+ return true;
+
+ // If RetType is a non-function pointer type, then this is the short syntax
+ // for the call, which means that RetType is just the return type. Infer the
+ // rest of the function argument types from the arguments that are present.
+ FunctionType *Ty = dyn_cast<FunctionType>(RetType);
+ if (!Ty) {
+ // Pull out the types of all of the arguments...
+ std::vector<Type *> ParamTypes;
+ for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
+ ParamTypes.push_back(ArgList[i].V->getType());
+
+ if (!FunctionType::isValidReturnType(RetType))
+ return Error(RetTypeLoc, "Invalid result type for LLVM function");
+
+ Ty = FunctionType::get(RetType, ParamTypes, false);
+ }
+
+ CalleeID.FTy = Ty;
+
+ // Look up the callee.
+ Value *Callee;
+ if (ConvertValIDToValue(PointerType::getUnqual(Ty), CalleeID, Callee, &PFS,
+ /*IsCall=*/true))
+ return true;
+
+ if (isa<InlineAsm>(Callee) && !Ty->getReturnType()->isVoidTy())
+ return Error(RetTypeLoc, "asm-goto outputs not supported");
+
+ // Set up the Attribute for the function.
+ SmallVector<Value *, 8> Args;
+ SmallVector<AttributeSet, 8> ArgAttrs;
+
+ // Loop through FunctionType's arguments and ensure they are specified
+ // correctly. Also, gather any parameter attributes.
+ FunctionType::param_iterator I = Ty->param_begin();
+ FunctionType::param_iterator E = Ty->param_end();
+ for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+ Type *ExpectedTy = nullptr;
+ if (I != E) {
+ ExpectedTy = *I++;
+ } else if (!Ty->isVarArg()) {
+ return Error(ArgList[i].Loc, "too many arguments specified");
+ }
+
+ if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
+ return Error(ArgList[i].Loc, "argument is not of expected type '" +
+ getTypeString(ExpectedTy) + "'");
+ Args.push_back(ArgList[i].V);
+ ArgAttrs.push_back(ArgList[i].Attrs);
+ }
+
+ if (I != E)
+ return Error(CallLoc, "not enough parameters specified for call");
+
+ if (FnAttrs.hasAlignmentAttr())
+ return Error(CallLoc, "callbr instructions may not have an alignment");
+
+ // Finish off the Attribute and check them
+ AttributeList PAL =
+ AttributeList::get(Context, AttributeSet::get(Context, FnAttrs),
+ AttributeSet::get(Context, RetAttrs), ArgAttrs);
+
+ CallBrInst *CBI =
+ CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
+ BundleList);
+ CBI->setCallingConv(CC);
+ CBI->setAttributes(PAL);
+ ForwardRefAttrGroups[CBI] = FwdRefAttrGrps;
+ Inst = CBI;
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Binary Operators.
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/AsmParser/LLParser.h b/llvm/lib/AsmParser/LLParser.h
index d8efbb1cf10..95aea0c775a 100644
--- a/llvm/lib/AsmParser/LLParser.h
+++ b/llvm/lib/AsmParser/LLParser.h
@@ -570,6 +570,7 @@ namespace llvm {
bool ParseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS);
bool ParseCatchPad(Instruction *&Inst, PerFunctionState &PFS);
bool ParseCleanupPad(Instruction *&Inst, PerFunctionState &PFS);
+ bool ParseCallBr(Instruction *&Inst, PerFunctionState &PFS);
bool ParseUnaryOp(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc,
unsigned OperandType);
diff --git a/llvm/lib/AsmParser/LLToken.h b/llvm/lib/AsmParser/LLToken.h
index 41899b29ce5..88eeae11a4b 100644
--- a/llvm/lib/AsmParser/LLToken.h
+++ b/llvm/lib/AsmParser/LLToken.h
@@ -327,6 +327,7 @@ enum Kind {
kw_catchret,
kw_catchpad,
kw_cleanuppad,
+ kw_callbr,
kw_alloca,
kw_load,
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 61bc031026b..b94bb66a0d6 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -4231,6 +4231,74 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
InstructionList.push_back(I);
break;
}
+ case bitc::FUNC_CODE_INST_CALLBR: {
+ // CALLBR: [attr, cc, norm, transfs, fty, fnid, args]
+ unsigned OpNum = 0;
+ AttributeList PAL = getAttributes(Record[OpNum++]);
+ unsigned CCInfo = Record[OpNum++];
+
+ BasicBlock *DefaultDest = getBasicBlock(Record[OpNum++]);
+ unsigned NumIndirectDests = Record[OpNum++];
+ SmallVector<BasicBlock *, 16> IndirectDests;
+ for (unsigned i = 0, e = NumIndirectDests; i != e; ++i)
+ IndirectDests.push_back(getBasicBlock(Record[OpNum++]));
+
+ FunctionType *FTy = nullptr;
+ if (CCInfo >> bitc::CALL_EXPLICIT_TYPE & 1 &&
+ !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]))))
+ return error("Explicit call type is not a function type");
+
+ Value *Callee;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
+ return error("Invalid record");
+
+ PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
+ if (!OpTy)
+ return error("Callee is not a pointer type");
+ if (!FTy) {
+ FTy = dyn_cast<FunctionType>(OpTy->getElementType());
+ if (!FTy)
+ return error("Callee is not of pointer to function type");
+ } else if (OpTy->getElementType() != FTy)
+ return error("Explicit call type does not match pointee type of "
+ "callee operand");
+ if (Record.size() < FTy->getNumParams() + OpNum)
+ return error("Insufficient operands to call");
+
+ SmallVector<Value*, 16> Args;
+ // Read the fixed params.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+ if (FTy->getParamType(i)->isLabelTy())
+ Args.push_back(getBasicBlock(Record[OpNum]));
+ else
+ Args.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
+ if (!Args.back())
+ return error("Invalid record");
+ }
+
+ // Read type/value pairs for varargs params.
+ if (!FTy->isVarArg()) {
+ if (OpNum != Record.size())
+ return error("Invalid record");
+ } else {
+ while (OpNum != Record.size()) {
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ Args.push_back(Op);
+ }
+ }
+
+ I = CallBrInst::Create(FTy, Callee, DefaultDest, IndirectDests, Args,
+ OperandBundles);
+ OperandBundles.clear();
+ InstructionList.push_back(I);
+ cast<CallBrInst>(I)->setCallingConv(
+ static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
+ cast<CallBrInst>(I)->setAttributes(PAL);
+ break;
+ }
case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
I = new UnreachableInst(Context);
InstructionList.push_back(I);
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index f4a539e51f7..a15ad55b0a3 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -2777,6 +2777,41 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(VE.getValueID(CatchSwitch.getUnwindDest()));
break;
}
+ case Instruction::CallBr: {
+ const CallBrInst *CBI = cast<CallBrInst>(&I);
+ const Value *Callee = CBI->getCalledValue();
+ FunctionType *FTy = CBI->getFunctionType();
+
+ if (CBI->hasOperandBundles())
+ writeOperandBundles(CBI, InstID);
+
+ Code = bitc::FUNC_CODE_INST_CALLBR;
+
+ Vals.push_back(VE.getAttributeListID(CBI->getAttributes()));
+
+ Vals.push_back(CBI->getCallingConv() << bitc::CALL_CCONV |
+ 1 << bitc::CALL_EXPLICIT_TYPE);
+
+ Vals.push_back(VE.getValueID(CBI->getDefaultDest()));
+ Vals.push_back(CBI->getNumIndirectDests());
+ for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
+ Vals.push_back(VE.getValueID(CBI->getIndirectDest(i)));
+
+ Vals.push_back(VE.getTypeID(FTy));
+ pushValueAndType(Callee, InstID, Vals);
+
+ // Emit value #'s for the fixed parameters.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ pushValue(I.getOperand(i), InstID, Vals); // fixed param.
+
+ // Emit type/value pairs for varargs params.
+ if (FTy->isVarArg()) {
+ for (unsigned i = FTy->getNumParams(), e = CBI->getNumArgOperands();
+ i != e; ++i)
+ pushValueAndType(I.getOperand(i), InstID, Vals); // vararg
+ }
+ break;
+ }
case Instruction::Unreachable:
Code = bitc::FUNC_CODE_INST_UNREACHABLE;
AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV;
diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index f2f11c19251..c735efab9c1 100644
--- a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -414,10 +414,8 @@ ValueEnumerator::ValueEnumerator(const Module &M,
EnumerateMetadata(&F, MD->getMetadata());
}
EnumerateType(I.getType());
- if (const CallInst *CI = dyn_cast<CallInst>(&I))
- EnumerateAttributes(CI->getAttributes());
- else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I))
- EnumerateAttributes(II->getAttributes());
+ if (const auto *Call = dyn_cast<CallBase>(&I))
+ EnumerateAttributes(Call->getAttributes());
// Enumerate metadata attached with this instruction.
MDs.clear();
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 7b4679e1f17..5319519ddc0 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1067,6 +1067,7 @@ void AsmPrinter::EmitFunctionBody() {
OutStreamer->EmitLabel(MI.getOperand(0).getMCSymbol());
break;
case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR:
EmitInlineAsm(&MI);
break;
case TargetOpcode::DBG_VALUE:
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index 9e6d35c5e9a..52acabec06d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -433,9 +433,16 @@ static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
++OpNo; // Skip over the ID number.
if (Modifier[0] == 'l') { // Labels are target independent.
- // FIXME: What if the operand isn't an MBB, report error?
- const MCSymbol *Sym = MI->getOperand(OpNo).getMBB()->getSymbol();
- Sym->print(OS, AP->MAI);
+ if (MI->getOperand(OpNo).isBlockAddress()) {
+ const BlockAddress *BA = MI->getOperand(OpNo).getBlockAddress();
+ MCSymbol *Sym = AP->GetBlockAddressSymbol(BA);
+ Sym->print(OS, AP->MAI);
+ } else if (MI->getOperand(OpNo).isMBB()) {
+ const MCSymbol *Sym = MI->getOperand(OpNo).getMBB()->getSymbol();
+ Sym->print(OS, AP->MAI);
+ } else {
+ Error = true;
+ }
} else {
if (InlineAsm::isMemKind(OpFlags)) {
Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 1e04f7918ee..14f56279e85 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -655,6 +655,16 @@ bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
BB->getSinglePredecessor()->getSingleSuccessor()))
return false;
+ // Skip merging if the block's successor is also a successor to any callbr
+ // that leads to this block.
+ // FIXME: Is this really needed? Is this a correctness issue?
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ if (auto *CBI = dyn_cast<CallBrInst>((*PI)->getTerminator()))
+ for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
+ if (DestBB == CBI->getSuccessor(i))
+ return false;
+ }
+
// Try to skip merging if the unique predecessor of BB is terminated by a
// switch or indirect branch instruction, and BB is used as an incoming block
// of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index c6cb17c2b0d..ab9d980ec64 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1259,6 +1259,12 @@ bool IRTranslator::translateInvoke(const User &U,
return true;
}
+bool IRTranslator::translateCallBr(const User &U,
+ MachineIRBuilder &MIRBuilder) {
+ // FIXME: Implement this.
+ return false;
+}
+
bool IRTranslator::translateLandingPad(const User &U,
MachineIRBuilder &MIRBuilder) {
const LandingPadInst &LP = cast<LandingPadInst>(U);
diff --git a/llvm/lib/CodeGen/IndirectBrExpandPass.cpp b/llvm/lib/CodeGen/IndirectBrExpandPass.cpp
index 9a96abfbf8c..7ac093ba4a7 100644
--- a/llvm/lib/CodeGen/IndirectBrExpandPass.cpp
+++ b/llvm/lib/CodeGen/IndirectBrExpandPass.cpp
@@ -148,11 +148,9 @@ bool IndirectBrExpandPass::runOnFunction(Function &F) {
ConstantInt *BBIndexC = ConstantInt::get(ITy, BBIndex);
// Now rewrite the blockaddress to an integer constant based on the index.
- // FIXME: We could potentially preserve the uses as arguments to inline asm.
- // This would allow some uses such as diagnostic information in crashes to
- // have higher quality even when this transform is enabled, but would break
- // users that round-trip blockaddresses through inline assembly and then
- // back into an indirectbr.
+ // FIXME: This part doesn't properly recognize other uses of blockaddress
+ // expressions, for instance, where they are used to pass labels to
+ // asm-goto. This part of the pass needs a rework.
BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(BBIndexC, BA->getType()));
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 618a0b0f7ef..5cbc9e2a88b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -1048,14 +1048,18 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
break;
}
- case ISD::INLINEASM: {
+ case ISD::INLINEASM:
+ case ISD::INLINEASM_BR: {
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
--NumOps; // Ignore the glue operand.
// Create the inline asm machine instruction.
- MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(),
- TII->get(TargetOpcode::INLINEASM));
+ unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
+ ? TargetOpcode::INLINEASM_BR
+ : TargetOpcode::INLINEASM;
+ MachineInstrBuilder MIB =
+ BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
// Add the asm string as an external symbol operand.
SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
diff --git a/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
index e85e29d0b7f..34660e3a48e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
@@ -84,6 +84,7 @@ ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
case ISD::CopyFromReg: NumberDeps++; break;
case ISD::CopyToReg: break;
case ISD::INLINEASM: break;
+ case ISD::INLINEASM_BR: break;
}
if (!ScegN->isMachineOpcode())
continue;
@@ -120,6 +121,7 @@ unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
case ISD::CopyFromReg: break;
case ISD::CopyToReg: NumberDeps++; break;
case ISD::INLINEASM: break;
+ case ISD::INLINEASM_BR: break;
}
if (!ScegN->isMachineOpcode())
continue;
@@ -445,6 +447,7 @@ int ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
break;
case ISD::INLINEASM:
+ case ISD::INLINEASM_BR:
ResCount += PriorityThree;
break;
}
@@ -547,6 +550,7 @@ void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) {
NodeNumDefs++;
break;
case ISD::INLINEASM:
+ case ISD::INLINEASM_BR:
NodeNumDefs++;
break;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index d2e97ce4c77..2cb850fa1a3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -479,7 +479,8 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
}
for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
- if (Node->getOpcode() == ISD::INLINEASM) {
+ if (Node->getOpcode() == ISD::INLINEASM ||
+ Node->getOpcode() == ISD::INLINEASM_BR) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 7616d11e050..75d9eb265ae 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -708,6 +708,7 @@ void ScheduleDAGRRList::EmitNode(SUnit *SU) {
// removed.
return;
case ISD::INLINEASM:
+ case ISD::INLINEASM_BR:
// For inline asm, clear the pipeline state.
HazardRec->Reset();
return;
@@ -1347,7 +1348,8 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
}
for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
- if (Node->getOpcode() == ISD::INLINEASM) {
+ if (Node->getOpcode() == ISD::INLINEASM ||
+ Node->getOpcode() == ISD::INLINEASM_BR) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index b205e97fa39..d41158134b2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2548,6 +2548,35 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
InvokeMBB->normalizeSuccProbs();
// Drop into normal successor.
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
+ DAG.getBasicBlock(Return)));
+}
+
+void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
+ MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
+
+ // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
+ // have to do anything here to lower funclet bundles.
+ assert(!I.hasOperandBundlesOtherThan(
+ {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
+ "Cannot lower callbrs with arbitrary operand bundles yet!");
+
+ assert(isa<InlineAsm>(I.getCalledValue()) &&
+ "Only know how to handle inlineasm callbr");
+ visitInlineAsm(&I);
+
+ // Retrieve successors.
+ MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
+
+ // Update successor info.
+ addSuccessorWithProb(CallBrMBB, Return);
+ for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
+ MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
+ addSuccessorWithProb(CallBrMBB, Target);
+ }
+ CallBrMBB->normalizeSuccProbs();
+
+ // Drop into default successor.
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Return)));
@@ -7584,7 +7613,14 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Process the call argument. BasicBlocks are labels, currently appearing
// only in asm's.
- if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
+ const Instruction *I = CS.getInstruction();
+ if (isa<CallBrInst>(I) &&
+ (ArgNo - 1) >= (cast<CallBrInst>(I)->getNumArgOperands() -
+ cast<CallBrInst>(I)->getNumIndirectDests())) {
+ const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
+ OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
+ } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
} else {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
@@ -7883,7 +7919,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
- Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
+ unsigned ISDOpc = isa<CallBrInst>(CS.getInstruction()) ? ISD::INLINEASM_BR : ISD::INLINEASM;
+ Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
Flag = Chain.getValue(1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 81941d55007..37b04e99ccf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -46,6 +46,7 @@ class AtomicRMWInst;
class BasicBlock;
class BranchInst;
class CallInst;
+class CallBrInst;
class CatchPadInst;
class CatchReturnInst;
class CatchSwitchInst;
@@ -851,6 +852,7 @@ public:
private:
// These all get lowered before this pass.
void visitInvoke(const InvokeInst &I);
+ void visitCallBr(const CallBrInst &I);
void visitResume(const ResumeInst &I);
void visitUnary(const User &I, unsigned Opcode);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index c14b94ebd25..490b2f9957e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -172,6 +172,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::UNDEF: return "undef";
case ISD::MERGE_VALUES: return "merge_values";
case ISD::INLINEASM: return "inlineasm";
+ case ISD::INLINEASM_BR: return "inlineasm_br";
case ISD::EH_LABEL: return "eh_label";
case ISD::HANDLENODE: return "handlenode";
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 9c69e7eee14..4eabab45e28 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2441,14 +2441,14 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
return !findNonImmUse(Root, N.getNode(), U, IgnoreChains);
}
-void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
+void SelectionDAGISel::Select_INLINEASM(SDNode *N, bool Branch) {
SDLoc DL(N);
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
SelectInlineAsmMemoryOperands(Ops, DL);
const EVT VTs[] = {MVT::Other, MVT::Glue};
- SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops);
+ SDValue New = CurDAG->getNode(Branch ? ISD::INLINEASM_BR : ISD::INLINEASM, DL, VTs, Ops);
New->setNodeId(-1);
ReplaceUses(N, New.getNode());
CurDAG->RemoveDeadNode(N);
@@ -2998,7 +2998,9 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
CurDAG->RemoveDeadNode(NodeToMatch);
return;
case ISD::INLINEASM:
- Select_INLINEASM(NodeToMatch);
+ case ISD::INLINEASM_BR:
+ Select_INLINEASM(NodeToMatch,
+ NodeToMatch->getOpcode() == ISD::INLINEASM_BR);
return;
case ISD::READ_REGISTER:
Select_READ_REGISTER(NodeToMatch);
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 0f343f5989f..484dbffefbb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3289,7 +3289,8 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
switch (ConstraintLetter) {
default: break;
case 'X': // Allows any operand; labels (basic block) use this.
- if (Op.getOpcode() == ISD::BasicBlock) {
+ if (Op.getOpcode() == ISD::BasicBlock ||
+ Op.getOpcode() == ISD::TargetBlockAddress) {
Ops.push_back(Op);
return;
}
@@ -3776,6 +3777,9 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
return;
}
+ if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress)
+ return;
+
// Otherwise, try to resolve it to something we know about by looking at
// the actual operand type.
if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 280305d516c..7eeea33a842 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1455,6 +1455,7 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
case Switch: return 0;
case IndirectBr: return 0;
case Invoke: return 0;
+ case CallBr: return 0;
case Resume: return 0;
case Unreachable: return 0;
case CleanupRet: return 0;
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 980de9ab1ee..af3db5186a2 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -3836,6 +3836,51 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(II->getNormalDest(), true);
Out << " unwind ";
writeOperand(II->getUnwindDest(), true);
+ } else if (const CallBrInst *CBI = dyn_cast<CallBrInst>(&I)) {
+ Operand = CBI->getCalledValue();
+ FunctionType *FTy = CBI->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+ const AttributeList &PAL = CBI->getAttributes();
+
+ // Print the calling convention being used.
+ if (CBI->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(CBI->getCallingConv(), Out);
+ }
+
+ if (PAL.hasAttributes(AttributeList::ReturnIndex))
+ Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
+
+ // If possible, print out the short form of the callbr instruction. We can
+ // only do this if the first argument is a pointer to a nonvararg function,
+ // and if the return type is not a pointer to a function.
+ //
+ Out << ' ';
+ TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out);
+ Out << ' ';
+ writeOperand(Operand, false);
+ Out << '(';
+ for (unsigned op = 0, Eop = CBI->getNumArgOperands(); op < Eop; ++op) {
+ if (op)
+ Out << ", ";
+ writeParamOperand(CBI->getArgOperand(op), PAL.getParamAttributes(op));
+ }
+
+ Out << ')';
+ if (PAL.hasAttributes(AttributeList::FunctionIndex))
+ Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes());
+
+ writeOperandBundles(CBI);
+
+ Out << "\n to ";
+ writeOperand(CBI->getDefaultDest(), true);
+ Out << " [";
+ for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) {
+ if (i != 0)
+ Out << ", ";
+ writeOperand(CBI->getIndirectDest(i), true);
+ }
+ Out << ']';
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
Out << ' ';
if (AI->isUsedWithInAlloca())
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index e031886bf15..32f3bfa66b1 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -301,6 +301,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case CatchRet: return "catchret";
case CatchPad: return "catchpad";
case CatchSwitch: return "catchswitch";
+ case CallBr: return "callbr";
// Standard unary operators...
case FNeg: return "fneg";
@@ -405,6 +406,10 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
+ if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
+ return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
+ CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
@@ -516,6 +521,7 @@ bool Instruction::mayReadFromMemory() const {
return true;
case Instruction::Call:
case Instruction::Invoke:
+ case Instruction::CallBr:
return !cast<CallBase>(this)->doesNotAccessMemory();
case Instruction::Store:
return !cast<StoreInst>(this)->isUnordered();
@@ -535,6 +541,7 @@ bool Instruction::mayWriteToMemory() const {
return true;
case Instruction::Call:
case Instruction::Invoke:
+ case Instruction::CallBr:
return !cast<CallBase>(this)->onlyReadsMemory();
case Instruction::Load:
return !cast<LoadInst>(this)->isUnordered();
@@ -772,8 +779,8 @@ void Instruction::updateProfWeight(uint64_t S, uint64_t T) {
}
void Instruction::setProfWeight(uint64_t W) {
- assert((isa<CallInst>(this) || isa<InvokeInst>(this)) &&
- "Can only set weights for call and invoke instrucitons");
+ assert(isa<CallBase>(this) &&
+ "Can only set weights for call like instructions");
SmallVector<uint32_t, 1> Weights;
Weights.push_back(W);
MDBuilder MDB(getContext());
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index b9e6cd653ea..766c41188ff 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -256,6 +256,11 @@ void LandingPadInst::addClause(Constant *Val) {
Function *CallBase::getCaller() { return getParent()->getParent(); }
+unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
+ assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
+ return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
+}
+
bool CallBase::isIndirectCall() const {
const Value *V = getCalledValue();
if (isa<Function>(V) || isa<Constant>(V))
@@ -727,6 +732,76 @@ LandingPadInst *InvokeInst::getLandingPadInst() const {
}
//===----------------------------------------------------------------------===//
+// CallBrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
+ ArrayRef<BasicBlock *> IndirectDests,
+ ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr) {
+ this->FTy = FTy;
+
+ assert((int)getNumOperands() ==
+ ComputeNumOperands(Args.size(), IndirectDests.size(),
+ CountBundleInputs(Bundles)) &&
+ "NumOperands not set up?");
+ NumIndirectDests = IndirectDests.size();
+ setDefaultDest(Fallthrough);
+ for (unsigned i = 0; i != NumIndirectDests; ++i)
+ setIndirectDest(i, IndirectDests[i]);
+ setCalledOperand(Fn);
+
+#ifndef NDEBUG
+ assert(((Args.size() == FTy->getNumParams()) ||
+ (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
+ "Calling a function with bad signature");
+
+ for (unsigned i = 0, e = Args.size(); i != e; i++)
+ assert((i >= FTy->getNumParams() ||
+ FTy->getParamType(i) == Args[i]->getType()) &&
+ "Calling a function with a bad signature!");
+#endif
+
+ std::copy(Args.begin(), Args.end(), op_begin());
+
+ auto It = populateBundleOperandInfos(Bundles, Args.size());
+ (void)It;
+ assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
+
+ setName(NameStr);
+}
+
+CallBrInst::CallBrInst(const CallBrInst &CBI)
+ : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
+ OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
+ CBI.getNumOperands()) {
+ setCallingConv(CBI.getCallingConv());
+ std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
+ std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
+ bundle_op_info_begin());
+ SubclassOptionalData = CBI.SubclassOptionalData;
+ NumIndirectDests = CBI.NumIndirectDests;
+}
+
+CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
+ Instruction *InsertPt) {
+ std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
+
+ auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(),
+ CBI->getCalledValue(),
+ CBI->getDefaultDest(),
+ CBI->getIndirectDests(),
+ Args, OpB, CBI->getName(), InsertPt);
+ NewCBI->setCallingConv(CBI->getCallingConv());
+ NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
+ NewCBI->setAttributes(CBI->getAttributes());
+ NewCBI->setDebugLoc(CBI->getDebugLoc());
+ NewCBI->NumIndirectDests = CBI->NumIndirectDests;
+ return NewCBI;
+}
+
+//===----------------------------------------------------------------------===//
// ReturnInst Implementation
//===----------------------------------------------------------------------===//
@@ -3996,6 +4071,14 @@ InvokeInst *InvokeInst::cloneImpl() const {
return new(getNumOperands()) InvokeInst(*this);
}
+CallBrInst *CallBrInst::cloneImpl() const {
+ if (hasOperandBundles()) {
+ unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
+ return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
+ }
+ return new (getNumOperands()) CallBrInst(*this);
+}
+
ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 2951cf1ed64..38eed76fe45 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -57,7 +57,8 @@ Value::Value(Type *ty, unsigned scid)
// FIXME: Why isn't this in the subclass gunk??
// Note, we cannot call isa<CallInst> before the CallInst has been
// constructed.
- if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke)
+ if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke ||
+ SubclassID == Instruction::CallBr)
assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
"invalid CallInst type!");
else if (SubclassID != BasicBlockVal &&
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 404749b2d8e..4a39ff40fd1 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -466,6 +466,7 @@ private:
void visitReturnInst(ReturnInst &RI);
void visitSwitchInst(SwitchInst &SI);
void visitIndirectBrInst(IndirectBrInst &BI);
+ void visitCallBrInst(CallBrInst &CBI);
void visitSelectInst(SelectInst &SI);
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
@@ -2450,6 +2451,26 @@ void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
visitTerminator(BI);
}
+void Verifier::visitCallBrInst(CallBrInst &CBI) {
+ Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!",
+ &CBI);
+ Assert(CBI.getType()->isVoidTy(), "Callbr return value is not supported!",
+ &CBI);
+ for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
+ Assert(CBI.getSuccessor(i)->getType()->isLabelTy(),
+ "Callbr successors must all have pointer type!", &CBI);
+ for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
+ Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)),
+ "Using an unescaped label as a callbr argument!", &CBI);
+ if (isa<BasicBlock>(CBI.getOperand(i)))
+ for (unsigned j = i + 1; j != e; ++j)
+ Assert(CBI.getOperand(i) != CBI.getOperand(j),
+ "Duplicate callbr destination!", &CBI);
+ }
+
+ visitTerminator(CBI);
+}
+
void Verifier::visitSelectInst(SelectInst &SI) {
Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
SI.getOperand(2)),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 0f0d877685d..1fc44b67136 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -590,6 +590,7 @@ static bool hasSourceMods(const SDNode *N) {
case ISD::FDIV:
case ISD::FREM:
case ISD::INLINEASM:
+ case ISD::INLINEASM_BR:
case AMDGPUISD::INTERP_P1:
case AMDGPUISD::INTERP_P2:
case AMDGPUISD::DIV_SCALE:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5fa4e3765b4..d6abd183105 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9697,7 +9697,8 @@ static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
do {
// Follow the chain until we find an INLINEASM node.
N = N->getOperand(0).getNode();
- if (N->getOpcode() == ISD::INLINEASM)
+ if (N->getOpcode() == ISD::INLINEASM ||
+ N->getOpcode() == ISD::INLINEASM_BR)
return true;
} while (N->getOpcode() == ISD::CopyFromReg);
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index c6cd7a1a4a6..0e3048792aa 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -5313,7 +5313,8 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
return 0;
case TargetOpcode::BUNDLE:
return getInstBundleSize(MI);
- case TargetOpcode::INLINEASM: {
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: {
const MachineFunction *MF = MI.getParent()->getParent();
const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 648435a3ed1..f765334577d 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -2615,6 +2615,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
return;
break;
case ISD::INLINEASM:
+ case ISD::INLINEASM_BR:
if (tryInlineAsm(N))
return;
break;
@@ -4319,7 +4320,7 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
if (!Changed)
return false;
- SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
+ SDValue New = CurDAG->getNode(N->getOpcode(), SDLoc(N),
CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
New->setNodeId(-1);
ReplaceNode(N, New.getNode());
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
index 4ea30a0fc39..ba7a95e92c5 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
@@ -487,7 +487,8 @@ unsigned AVRInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
case TargetOpcode::KILL:
case TargetOpcode::DBG_VALUE:
return 0;
- case TargetOpcode::INLINEASM: {
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: {
const MachineFunction &MF = *MI.getParent()->getParent();
const AVRTargetMachine &TM = static_cast<const AVRTargetMachine&>(MF.getTarget());
const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>();
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 0b9f424822c..c9ee83a249f 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -578,7 +578,8 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
unsigned LR = HRI.getRARegister();
- if (Op.getOpcode() != ISD::INLINEASM || HMFI.hasClobberLR())
+ if ((Op.getOpcode() != ISD::INLINEASM &&
+ Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
return Op;
unsigned NumOps = Op.getNumOperands();
@@ -1291,6 +1292,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
+ setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
@@ -2740,7 +2742,7 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
unsigned Opc = Op.getOpcode();
// Handle INLINEASM first.
- if (Opc == ISD::INLINEASM)
+ if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
return LowerINLINEASM(Op, DAG);
if (isHvxOperation(Op)) {
diff --git a/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 3a291bd1d80..0e655502430 100644
--- a/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -112,6 +112,7 @@ bool VLIWResourceModel::isResourceAvailable(SUnit *SU, bool IsTop) {
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::COPY:
case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR:
break;
}
@@ -167,6 +168,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU, bool IsTop) {
case TargetOpcode::EH_LABEL:
case TargetOpcode::COPY:
case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR:
break;
}
Packet.push_back(SU);
diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index de5c243bb2f..5c3a3fc6926 100644
--- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -307,7 +307,8 @@ unsigned MSP430InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
case TargetOpcode::KILL:
case TargetOpcode::DBG_VALUE:
return 0;
- case TargetOpcode::INLINEASM: {
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
return TII.getInlineAsmLength(MI.getOperand(0).getSymbolName(),
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 610c4079a67..fbd56206b24 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -577,7 +577,8 @@ unsigned MipsInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default:
return MI.getDesc().getSize();
- case TargetOpcode::INLINEASM: { // Inline Asm: Variable size.
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: { // Inline Asm: Variable size.
const MachineFunction *MF = MI.getParent()->getParent();
const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 12ebdb4b1d1..86062dde9fa 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -1000,7 +1000,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
if (noImmForm)
OperandBase = 1;
- else if (OpC != TargetOpcode::INLINEASM) {
+ else if (OpC != TargetOpcode::INLINEASM &&
+ OpC != TargetOpcode::INLINEASM_BR) {
assert(ImmToIdxMap.count(OpC) &&
"No indexed form of load or store available!");
unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index afa872b4a5c..ddb976b47fb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -439,7 +439,8 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
case RISCV::PseudoCALL:
case RISCV::PseudoTAIL:
return 8;
- case TargetOpcode::INLINEASM: {
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: {
const MachineFunction &MF = *MI.getParent()->getParent();
const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
diff --git a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index 17ad34663da..8cff50d19ed 100644
--- a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -312,7 +312,7 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){
SelectInlineAsmMemoryOperands(AsmNodeOperands, SDLoc(N));
- SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
+ SDValue New = CurDAG->getNode(N->getOpcode(), SDLoc(N),
CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
New->setNodeId(-1);
ReplaceNode(N, New.getNode());
@@ -328,7 +328,8 @@ void SparcDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
- case ISD::INLINEASM: {
+ case ISD::INLINEASM:
+ case ISD::INLINEASM_BR: {
if (tryInlineAsm(N))
return;
break;
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 8b5ac8fd066..cd2ae824982 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -253,6 +253,11 @@ static void printOperand(X86AsmPrinter &P, const MachineInstr *MI,
printSymbolOperand(P, MO, O);
break;
}
+ case MachineOperand::MO_BlockAddress: {
+ MCSymbol *Sym = P.GetBlockAddressSymbol(MO.getBlockAddress());
+ Sym->print(O, P.MAI);
+ break;
+ }
}
}
diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp
index bd2e8a2c063..3ef2c1b5171 100644
--- a/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -1476,7 +1476,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
break;
}
- case TargetOpcode::INLINEASM: {
+ case TargetOpcode::INLINEASM:
+ case TargetOpcode::INLINEASM_BR: {
// The inline asm MachineInstr currently only *uses* FP registers for the
// 'f' constraint. These should be turned into the current ST(x) register
// in the machine instr.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index b34b3fd1619..ab7c3a5e294 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the visitCall and visitInvoke functions.
+// This file implements the visitCall, visitInvoke, and visitCallBr functions.
//
//===----------------------------------------------------------------------===//
@@ -1834,8 +1834,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallBase(CI);
- // Intrinsics cannot occur in an invoke, so handle them here instead of in
- // visitCallBase.
+ // Intrinsics cannot occur in an invoke or a callbr, so handle them here
+ // instead of in visitCallBase.
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
bool Changed = false;
@@ -4017,6 +4017,11 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
return visitCallBase(II);
}
+// CallBrInst simplification
+Instruction *InstCombiner::visitCallBrInst(CallBrInst &CBI) {
+ return visitCallBase(CBI);
+}
+
/// If this cast does not affect the value passed through the varargs area, we
/// can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallBase &Call,
@@ -4145,7 +4150,7 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) {
return nullptr;
}
-/// Improvements for call and invoke instructions.
+/// Improvements for call, callbr and invoke instructions.
Instruction *InstCombiner::visitCallBase(CallBase &Call) {
if (isAllocLikeFn(&Call, &TLI))
return visitAllocSite(Call);
@@ -4178,7 +4183,7 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) {
}
// If the callee is a pointer to a function, attempt to move any casts to the
- // arguments of the call/invoke.
+ // arguments of the call/callbr/invoke.
Value *Callee = Call.getCalledValue();
if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
return nullptr;
@@ -4211,9 +4216,9 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) {
if (isa<CallInst>(OldCall))
return eraseInstFromFunction(*OldCall);
- // We cannot remove an invoke, because it would change the CFG, just
- // change the callee to a null pointer.
- cast<InvokeInst>(OldCall)->setCalledFunction(
+ // We cannot remove an invoke or a callbr, because it would change thexi
+ // CFG, just change the callee to a null pointer.
+ cast<CallBase>(OldCall)->setCalledFunction(
CalleeF->getFunctionType(),
Constant::getNullValue(CalleeF->getType()));
return nullptr;
@@ -4228,8 +4233,8 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) {
if (!Call.getType()->isVoidTy())
replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
- if (isa<InvokeInst>(Call)) {
- // Can't remove an invoke because we cannot change the CFG.
+ if (Call.isTerminator()) {
+ // Can't remove an invoke or callbr because we cannot change the CFG.
return nullptr;
}
@@ -4282,7 +4287,7 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) {
}
/// If the callee is a constexpr cast of a function, attempt to move the cast to
-/// the arguments of the call/invoke.
+/// the arguments of the call/callbr/invoke.
bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
if (!Callee)
@@ -4333,17 +4338,21 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
return false; // Attribute not compatible with transformed value.
}
- // If the callbase is an invoke instruction, and the return value is used by
- // a PHI node in a successor, we cannot change the return type of the call
- // because there is no place to put the cast instruction (without breaking
- // the critical edge). Bail out in this case.
- if (!Caller->use_empty())
+ // If the callbase is an invoke/callbr instruction, and the return value is
+ // used by a PHI node in a successor, we cannot change the return type of
+ // the call because there is no place to put the cast instruction (without
+ // breaking the critical edge). Bail out in this case.
+ if (!Caller->use_empty()) {
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
for (User *U : II->users())
if (PHINode *PN = dyn_cast<PHINode>(U))
if (PN->getParent() == II->getNormalDest() ||
PN->getParent() == II->getUnwindDest())
return false;
+ // FIXME: Be conservative for callbr to avoid a quadratic search.
+ if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller))
+ return false;
+ }
}
unsigned NumActualArgs = Call.arg_size();
@@ -4497,6 +4506,9 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
II->getUnwindDest(), Args, OpBundles);
+ } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
+ NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
+ CBI->getIndirectDests(), Args, OpBundles);
} else {
NewCall = Builder.CreateCall(Callee, Args, OpBundles);
cast<CallInst>(NewCall)->setTailCallKind(
@@ -4520,11 +4532,14 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
NC->setDebugLoc(Caller->getDebugLoc());
- // If this is an invoke instruction, we should insert it after the first
- // non-phi, instruction in the normal successor block.
+ // If this is an invoke/callbr instruction, we should insert it after the
+ // first non-phi instruction in the normal successor block.
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
InsertNewInstBefore(NC, *I);
+ } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
+ BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
+ InsertNewInstBefore(NC, *I);
} else {
// Otherwise, it's a call, just insert cast right after the call.
InsertNewInstBefore(NC, *Caller);
@@ -4673,6 +4688,12 @@ InstCombiner::transformCallThroughTrampoline(CallBase &Call,
NewArgs, OpBundles);
cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
+ } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
+ NewCaller =
+ CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
+ CBI->getIndirectDests(), NewArgs, OpBundles);
+ cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
+ cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
} else {
NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
cast<CallInst>(NewCaller)->setTailCallKind(
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 951e0e72e9e..5b0c7fce0d1 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -392,6 +392,7 @@ public:
Instruction *visitSelectInst(SelectInst &SI);
Instruction *visitCallInst(CallInst &CI);
Instruction *visitInvokeInst(InvokeInst &II);
+ Instruction *visitCallBrInst(CallBrInst &CBI);
Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
Instruction *visitPHINode(PHINode &PN);
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 1f40c8f5a4a..4d04e3ff99e 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -921,8 +921,8 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// If the InVal is an invoke at the end of the pred block, then we can't
// insert a computation after it without breaking the edge.
- if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
- if (II->getParent() == NonConstBB)
+ if (isa<InvokeInst>(InVal))
+ if (cast<Instruction>(InVal)->getParent() == NonConstBB)
return nullptr;
// If the incoming non-constant value is in I's block, we will remove one
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 7595ae05787..a02f32f5643 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1131,6 +1131,14 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
return false;
}
+ // FIXME: Can we support the fallthrough edge?
+ if (isa<CallBrInst>(Pred->getTerminator())) {
+ LLVM_DEBUG(
+ dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
+ << Pred->getName() << "': " << *LI << '\n');
+ return false;
+ }
+
if (LoadBB->isEHPad()) {
LLVM_DEBUG(
dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
@@ -2167,8 +2175,8 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
return false;
// We don't currently value number ANY inline asm calls.
- if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
- if (CallI->isInlineAsm())
+ if (auto *CallB = dyn_cast<CallBase>(CurInst))
+ if (CallB->isInlineAsm())
return false;
uint32_t ValNo = VN.lookup(CurInst);
@@ -2251,6 +2259,11 @@ bool GVN::performScalarPRE(Instruction *CurInst) {
if (isa<IndirectBrInst>(PREPred->getTerminator()))
return false;
+ // Don't do PRE across callbr.
+ // FIXME: Can we do this across the fallthrough edge?
+ if (isa<CallBrInst>(PREPred->getTerminator()))
+ return false;
+
// We can't do PRE safely on a critical edge, so instead we schedule
// the edge to be split and perform the PRE the next time we iterate
// on the function.
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 7738a79425b..f74f7e28e52 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1055,7 +1055,7 @@ bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) {
Condition = IB->getAddress()->stripPointerCasts();
Preference = WantBlockAddress;
} else {
- return false; // Must be an invoke.
+ return false; // Must be an invoke or callbr.
}
// Run constant folding to see if we can reduce the condition to a simple
@@ -1428,7 +1428,9 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) {
// Add all the unavailable predecessors to the PredsToSplit list.
for (BasicBlock *P : predecessors(LoadBB)) {
// If the predecessor is an indirect goto, we can't split the edge.
- if (isa<IndirectBrInst>(P->getTerminator()))
+ // Same for CallBr.
+ if (isa<IndirectBrInst>(P->getTerminator()) ||
+ isa<CallBrInst>(P->getTerminator()))
return false;
if (!AvailablePredSet.count(P))
@@ -1641,8 +1643,9 @@ bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
++PredWithKnownDest;
// If the predecessor ends with an indirect goto, we can't change its
- // destination.
- if (isa<IndirectBrInst>(Pred->getTerminator()))
+ // destination. Same for CallBr.
+ if (isa<IndirectBrInst>(Pred->getTerminator()) ||
+ isa<CallBrInst>(Pred->getTerminator()))
continue;
PredToDestList.push_back(std::make_pair(Pred, DestBB));
diff --git a/llvm/lib/Transforms/Scalar/SCCP.cpp b/llvm/lib/Transforms/Scalar/SCCP.cpp
index 5dd7f43e662..39d294f8602 100644
--- a/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -638,6 +638,11 @@ private:
visitTerminator(II);
}
+ void visitCallBrInst (CallBrInst &CBI) {
+ visitCallSite(&CBI);
+ visitTerminator(CBI);
+ }
+
void visitCallSite (CallSite CS);
void visitResumeInst (ResumeInst &I) { /*returns void*/ }
void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ }
@@ -733,6 +738,13 @@ void SCCPSolver::getFeasibleSuccessors(Instruction &TI,
return;
}
+ // In case of callbr, we pessimistically assume that all successors are
+ // feasible.
+ if (isa<CallBrInst>(&TI)) {
+ Succs.assign(TI.getNumSuccessors(), true);
+ return;
+ }
+
LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
llvm_unreachable("SCCP: Don't know how to handle this terminator!");
}
@@ -1597,6 +1609,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
return true;
case Instruction::Call:
case Instruction::Invoke:
+ case Instruction::CallBr:
// There are two reasons a call can have an undef result
// 1. It could be tracked.
// 2. It could be constant-foldable.
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 41ad4fefe1f..2410f652fd7 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -549,6 +549,8 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
// all BlockAddress uses would need to be updated.
assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) &&
"Cannot split an edge from an IndirectBrInst");
+ assert(!isa<CallBrInst>(Preds[i]->getTerminator()) &&
+ "Cannot split an edge from a CallBrInst");
Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB);
}
diff --git a/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index ab604a6c57c..d73fefdf9c9 100644
--- a/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -144,6 +144,10 @@ llvm::SplitCriticalEdge(Instruction *TI, unsigned SuccNum,
// it in this generic function.
if (DestBB->isEHPad()) return nullptr;
+ // Don't split the non-fallthrough edge from a callbr.
+ if (isa<CallBrInst>(TI) && SuccNum > 0)
+ return nullptr;
+
// Create a new basic block, linking it into the CFG.
BasicBlock *NewBB = BasicBlock::Create(TI->getContext(),
TIBB->getName() + "." + DestBB->getName() + "_crit_edge");
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 9015b36fa35..7443a7f9c5e 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1504,6 +1504,10 @@ llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
assert(TheCall->getParent() && TheCall->getFunction()
&& "Instruction not in function!");
+ // FIXME: we don't inline callbr yet.
+ if (isa<CallBrInst>(TheCall))
+ return false;
+
// If IFI has any state in it, zap it before we fill it in.
IFI.reset();
@@ -1729,6 +1733,8 @@ llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
Instruction *NewI = nullptr;
if (isa<CallInst>(I))
NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
+ else if (isa<CallBrInst>(I))
+ NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I);
else
NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
@@ -2031,6 +2037,8 @@ llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
Instruction *NewInst;
if (CS.isCall())
NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
+ else if (CS.isCallBr())
+ NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I);
else
NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
NewInst->takeName(I);
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 70812dc2de8..062bbcdae2c 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -996,6 +996,18 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
}
}
+ // We cannot fold the block if it's a branch to an already present callbr
+ // successor because that creates duplicate successors.
+ for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
+ if (auto *CBI = dyn_cast<CallBrInst>((*I)->getTerminator())) {
+ if (Succ == CBI->getDefaultDest())
+ return false;
+ for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
+ if (Succ == CBI->getIndirectDest(i))
+ return false;
+ }
+ }
+
LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
SmallVector<DominatorTree::UpdateType, 32> Updates;
diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index b2aa20bc0f8..954e8038dfb 100644
--- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -27,6 +27,9 @@
// to transform the loop and make these guarantees. Client code should check
// that these conditions are true before relying on them.
//
+// Similar complications arise from callbr instructions, particularly in
+// asm-goto where blockaddress expressions are used.
+//
// Note that the simplifycfg pass will clean up blocks which are split out but
// end up being unnecessary, so usage of this pass should not pessimize
// generated code.
@@ -123,10 +126,11 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT,
PI != PE; ++PI) {
BasicBlock *P = *PI;
if (!L->contains(P)) { // Coming in from outside the loop?
- // If the loop is branched to from an indirect branch, we won't
+ // If the loop is branched to from an indirect terminator, we won't
// be able to fully transform the loop, because it prohibits
// edge splitting.
- if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
+ if (P->getTerminator()->isIndirectTerminator())
+ return nullptr;
// Keep track of it.
OutsideBlocks.push_back(P);
@@ -235,8 +239,8 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
if (PN->getIncomingValue(i) != PN ||
!L->contains(PN->getIncomingBlock(i))) {
- // We can't split indirectbr edges.
- if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator()))
+ // We can't split indirect control flow edges.
+ if (PN->getIncomingBlock(i)->getTerminator()->isIndirectTerminator())
return nullptr;
OuterLoopPreds.push_back(PN->getIncomingBlock(i));
}
@@ -357,8 +361,8 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
BasicBlock *P = *I;
- // Indirectbr edges cannot be split, so we must fail if we find one.
- if (isa<IndirectBrInst>(P->getTerminator()))
+ // Indirect edges cannot be split, so we must fail if we find one.
+ if (P->getTerminator()->isIndirectTerminator())
return nullptr;
if (P != Preheader) BackedgeBlocks.push_back(P);
diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp
index 5e661ae8c21..5539ff12e4a 100644
--- a/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -65,6 +65,9 @@ bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
if (isa<IndirectBrInst>(PredBB->getTerminator()))
// We cannot rewrite exiting edges from an indirectbr.
return false;
+ if (isa<CallBrInst>(PredBB->getTerminator()))
+ // We cannot rewrite exiting edges from a callbr.
+ return false;
InLoopPredecessors.push_back(PredBB);
} else {
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 3fec17ac8cc..00bcb8479c3 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1265,8 +1265,10 @@ static bool HoistThenElseCodeToIf(BranchInst *BI,
while (isa<DbgInfoIntrinsic>(I2))
I2 = &*BB2_Itr++;
}
+ // FIXME: Can we define a safety predicate for CallBr?
if (isa<PHINode>(I1) || !I1->isIdenticalToWhenDefined(I2) ||
- (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)))
+ (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) ||
+ isa<CallBrInst>(I1))
return false;
BasicBlock *BIParent = BI->getParent();
@@ -1349,9 +1351,14 @@ static bool HoistThenElseCodeToIf(BranchInst *BI,
HoistTerminator:
// It may not be possible to hoist an invoke.
+ // FIXME: Can we define a safety predicate for CallBr?
if (isa<InvokeInst>(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2))
return Changed;
+ // TODO: callbr hoisting currently disabled pending further study.
+ if (isa<CallBrInst>(I1))
+ return Changed;
+
for (BasicBlock *Succ : successors(BB1)) {
for (PHINode &PN : Succ->phis()) {
Value *BB1V = PN.getIncomingValueForBlock(BB1);
@@ -1443,7 +1450,7 @@ static bool canSinkInstructions(
// Conservatively return false if I is an inline-asm instruction. Sinking
// and merging inline-asm instructions can potentially create arguments
// that cannot satisfy the inline-asm constraints.
- if (const auto *C = dyn_cast<CallInst>(I))
+ if (const auto *C = dyn_cast<CallBase>(I))
if (C->isInlineAsm())
return false;
@@ -1506,7 +1513,7 @@ static bool canSinkInstructions(
// We can't create a PHI from this GEP.
return false;
// Don't create indirect calls! The called value is the final operand.
- if ((isa<CallInst>(I0) || isa<InvokeInst>(I0)) && OI == OE - 1) {
+ if (isa<CallBase>(I0) && OI == OE - 1) {
// FIXME: if the call was *already* indirect, we should do this.
return false;
}
OpenPOWER on IntegriCloud