summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp5
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp14
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.h7
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp28
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h10
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp6
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp7
11 files changed, 42 insertions, 45 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 41c3f5f235e..82b0caf0c11 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6068,7 +6068,7 @@ SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
if (MF.hasEHFunclets()) {
assert(CLI.CS);
WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
- EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS->getInstruction()),
+ EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
BeginLabel, EndLabel);
} else {
MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
@@ -8135,8 +8135,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
}
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
- CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind,
- true);
+ CLI.CS.getInstruction(), ExtendKind, true);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 440f35038d3..08028337bc9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3187,7 +3187,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Check if it's really possible to do a tail call.
IsTailCall = isEligibleForTailCallOptimization(
Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
- if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a6317957b68..73ba1671df4 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1785,7 +1785,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
Outs, OutVals, Ins, DAG);
- if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
// We don't support GuaranteedTailCallOpt for ARM, only automatically
@@ -2032,7 +2032,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// more times in this block, we can improve codesize by calling indirectly
// as BLXr has a 16-bit encoding.
auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
- auto *BB = CLI.CS->getParent();
+ auto *BB = CLI.CS.getParent();
bool PreferIndirect =
Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
count_if(GV->users(), [&BB](const User *U) {
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index c4077a96a1d..c78f491ce4b 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2985,7 +2985,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
G->getGlobal()->hasProtectedVisibility());
}
}
- if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
+ if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 70070c259c6..150e67a833f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1260,7 +1260,7 @@ NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
std::string NVPTXTargetLowering::getPrototype(
const DataLayout &DL, Type *retTy, const ArgListTy &Args,
const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
- const ImmutableCallSite *CS) const {
+ ImmutableCallSite CS) const {
auto PtrVT = getPointerTy(DL);
bool isABI = (STI.getSmVersion() >= 20);
@@ -1294,7 +1294,7 @@ std::string NVPTXTargetLowering::getPrototype(
} else if (isa<PointerType>(retTy)) {
O << ".param .b" << PtrVT.getSizeInBits() << " _";
} else if (retTy->isAggregateType() || retTy->isVectorTy() || retTy->isIntegerTy(128)) {
- auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
+ auto &DL = CS.getCalledFunction()->getParent()->getDataLayout();
O << ".param .align " << retAlignment << " .b8 _["
<< DL.getTypeAllocSize(retTy) << "]";
} else {
@@ -1317,7 +1317,7 @@ std::string NVPTXTargetLowering::getPrototype(
if (!Outs[OIdx].Flags.isByVal()) {
if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) {
unsigned align = 0;
- const CallInst *CallI = cast<CallInst>(CS->getInstruction());
+ const CallInst *CallI = cast<CallInst>(CS.getInstruction());
// +1 because index 0 is reserved for return type alignment
if (!getAlign(*CallI, i + 1, align))
align = DL.getABITypeAlignment(Ty);
@@ -1370,7 +1370,7 @@ std::string NVPTXTargetLowering::getPrototype(
}
unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
- const ImmutableCallSite *CS,
+ ImmutableCallSite CS,
Type *Ty, unsigned Idx,
const DataLayout &DL) const {
if (!CS) {
@@ -1379,12 +1379,12 @@ unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
}
unsigned Align = 0;
- const Value *DirectCallee = CS->getCalledFunction();
+ const Value *DirectCallee = CS.getCalledFunction();
if (!DirectCallee) {
// We don't have a direct function symbol, but that may be because of
// constant cast instructions in the call.
- const Instruction *CalleeI = CS->getInstruction();
+ const Instruction *CalleeI = CS.getInstruction();
assert(CalleeI && "Call target is not a function or derived value?");
// With bitcast'd call targets, the instruction will be the call
@@ -1433,7 +1433,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool &isTailCall = CLI.IsTailCall;
ArgListTy &Args = CLI.getArgs();
Type *RetTy = CLI.RetTy;
- ImmutableCallSite *CS = CLI.CS;
+ ImmutableCallSite CS = CLI.CS;
const DataLayout &DL = DAG.getDataLayout();
bool isABI = (STI.getSmVersion() >= 20);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index cbe2dba3e66..362c3ba65e4 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -491,7 +491,7 @@ public:
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &,
const SmallVectorImpl<ISD::OutputArg> &,
unsigned retAlignment,
- const ImmutableCallSite *CS) const;
+ ImmutableCallSite CS) const;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
@@ -571,9 +571,8 @@ private:
SelectionDAG &DAG) const override;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- unsigned getArgumentAlignment(SDValue Callee, const ImmutableCallSite *CS,
- Type *Ty, unsigned Idx,
- const DataLayout &DL) const;
+ unsigned getArgumentAlignment(SDValue Callee, ImmutableCallSite CS, Type *Ty,
+ unsigned Idx, const DataLayout &DL) const;
};
} // namespace llvm
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index bf3430524bf..3fa2c76673c 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -4335,12 +4335,12 @@ needStackSlotPassParameters(const PPCSubtarget &Subtarget,
}
static bool
-hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) {
- if (CS->arg_size() != CallerFn->arg_size())
+hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
+ if (CS.arg_size() != CallerFn->arg_size())
return false;
- ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin();
- ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end();
+ ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
+ ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
@@ -4367,7 +4367,7 @@ bool
PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
SDValue Callee,
CallingConv::ID CalleeCC,
- ImmutableCallSite *CS,
+ ImmutableCallSite CS,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -4676,7 +4676,7 @@ PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
bool isPatchPoint, bool hasNest,
SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
- ImmutableCallSite *CS, const PPCSubtarget &Subtarget) {
+ ImmutableCallSite CS, const PPCSubtarget &Subtarget) {
bool isPPC64 = Subtarget.isPPC64();
bool isSVR4ABI = Subtarget.isSVR4ABI();
bool isELFv2ABI = Subtarget.isELFv2ABI();
@@ -4787,7 +4787,7 @@ PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
MachineMemOperand::MOInvariant)
: MachineMemOperand::MONone;
- MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr);
+ MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
/* Alignment = */ 8, MMOFlags);
@@ -4917,7 +4917,7 @@ SDValue PPCTargetLowering::FinishCall(
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const {
+ SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
std::vector<EVT> NodeTys;
SmallVector<SDValue, 8> Ops;
unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
@@ -5025,10 +5025,10 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
bool isPatchPoint = CLI.IsPatchPoint;
- ImmutableCallSite *CS = CLI.CS;
+ ImmutableCallSite CS = CLI.CS;
if (isTailCall) {
- if (Subtarget.useLongCalls() && !(CS && CS->isMustTailCall()))
+ if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
isTailCall = false;
else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
isTailCall =
@@ -5056,7 +5056,7 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
}
- if (!isTailCall && CS && CS->isMustTailCall())
+ if (!isTailCall && CS && CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
@@ -5090,7 +5090,7 @@ SDValue PPCTargetLowering::LowerCall_32SVR4(
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+ ImmutableCallSite CS) const {
// See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
@@ -5324,7 +5324,7 @@ SDValue PPCTargetLowering::LowerCall_64SVR4(
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+ ImmutableCallSite CS) const {
bool isELFv2ABI = Subtarget.isELFv2ABI();
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned NumOps = Outs.size();
@@ -5974,7 +5974,7 @@ SDValue PPCTargetLowering::LowerCall_Darwin(
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+ ImmutableCallSite CS) const {
unsigned NumOps = Outs.size();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 8f97fb4f6ec..e2f3812e2ff 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -907,7 +907,7 @@ namespace llvm {
IsEligibleForTailCallOptimization_64SVR4(
SDValue Callee,
CallingConv::ID CalleeCC,
- ImmutableCallSite *CS,
+ ImmutableCallSite CS,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -973,7 +973,7 @@ namespace llvm {
SDValue &Callee, int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
+ ImmutableCallSite CS) const;
SDValue
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
@@ -1024,7 +1024,7 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
+ ImmutableCallSite CS) const;
SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall, bool isPatchPoint,
@@ -1033,7 +1033,7 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
+ ImmutableCallSite CS) const;
SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall, bool isPatchPoint,
@@ -1042,7 +1042,7 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
+ ImmutableCallSite CS) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 40485bfb606..422f5aba8da 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -692,9 +692,9 @@ SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
- ImmutableCallSite *CS) {
+ ImmutableCallSite CS) {
if (CS)
- return CS->hasFnAttr(Attribute::ReturnsTwice);
+ return CS.hasFnAttr(Attribute::ReturnsTwice);
const Function *CalleeFn = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
@@ -1334,7 +1334,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Set inreg flag manually for codegen generated library calls that
// return float.
- if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr)
+ if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
CLI.Ins[0].Flags.setInReg();
RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 21df22c7a6d..7724a75bec2 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -314,7 +314,7 @@ SDValue WebAssemblyTargetLowering::LowerCall(
// required, fail. Otherwise, just disable them.
if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
MF.getTarget().Options.GuaranteedTailCallOpt) ||
- (CLI.CS && CLI.CS->isMustTailCall()))
+ (CLI.CS && CLI.CS.isMustTailCall()))
fail(DL, DAG, "WebAssembly doesn't support tail call yet");
CLI.IsTailCall = false;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 24a12c3ccc7..d488ed7d124 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3335,8 +3335,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
- const CallInst *CI =
- CLI.CS ? dyn_cast<CallInst>(CLI.CS->getInstruction()) : nullptr;
+ const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
(Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
@@ -3360,7 +3359,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isTailCall = false;
}
- bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
+ bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
if (IsMustTail) {
// Force this to be a tail call. The verifier rules are enough to ensure
// that we can lower this successfully without moving the return address
@@ -3764,7 +3763,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// is thrown, the runtime will not restore CSRs.
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
- if (!Is64Bit && CLI.CS && CLI.CS->isInvoke()) {
+ if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
const Function *CallerFn = MF.getFunction();
EHPersonality Pers =
CallerFn->hasPersonalityFn()
OpenPOWER on IntegriCloud