diff options
author | Saleem Abdulrasool <compnerd@compnerd.org> | 2016-05-31 01:48:07 +0000 |
---|---|---|
committer | Saleem Abdulrasool <compnerd@compnerd.org> | 2016-05-31 01:48:07 +0000 |
commit | d2f705ddf942467911b2038d64bc04c7fddfdcdd (patch) | |
tree | 8c95f986123b640124472491c3fc06c54cbf34a1 | |
parent | 8df24c3cfd12b25415169ea8ae69ba8d64a2254f (diff) | |
download | bcm5719-llvm-d2f705ddf942467911b2038d64bc04c7fddfdcdd.tar.gz bcm5719-llvm-d2f705ddf942467911b2038d64bc04c7fddfdcdd.zip |
X86: permit using SjLj EH on x86 targets as an option
This adds support to the backed to actually support SjLj EH as an exception
model. This is *NOT* the default model, and requires explicitly opting into it
from the frontend. GCC supports this model and for MinGW can still be enabled
via the `--using-sjlj-exceptions` options.
Addresses PR27749!
llvm-svn: 271244
-rw-r--r-- | llvm/include/llvm/Analysis/EHPersonalities.h | 2 | ||||
-rw-r--r-- | llvm/include/llvm/CodeGen/CommandFlags.h | 17 | ||||
-rw-r--r-- | llvm/include/llvm/MC/MCAsmInfo.h | 4 | ||||
-rw-r--r-- | llvm/lib/Analysis/EHPersonalities.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/LLVMTargetMachine.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 261 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 10 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 7 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sjlj-eh.ll | 72 |
10 files changed, 379 insertions, 1 deletions
diff --git a/llvm/include/llvm/Analysis/EHPersonalities.h b/llvm/include/llvm/Analysis/EHPersonalities.h index c647a4932b6..a26c575cfe1 100644 --- a/llvm/include/llvm/Analysis/EHPersonalities.h +++ b/llvm/include/llvm/Analysis/EHPersonalities.h @@ -23,7 +23,9 @@ enum class EHPersonality { Unknown, GNU_Ada, GNU_C, + GNU_C_SjLj, GNU_CXX, + GNU_CXX_SjLj, GNU_ObjC, MSVC_X86SEH, MSVC_Win64SEH, diff --git a/llvm/include/llvm/CodeGen/CommandFlags.h b/llvm/include/llvm/CodeGen/CommandFlags.h index 9bc1442b258..defc219ce94 100644 --- a/llvm/include/llvm/CodeGen/CommandFlags.h +++ b/llvm/include/llvm/CodeGen/CommandFlags.h @@ -90,6 +90,22 @@ CMModel("code-model", "Large code model"), clEnumValEnd)); +cl::opt<llvm::ExceptionHandling> +ExceptionModel("exception-model", + cl::desc("exception model"), + cl::init(ExceptionHandling::None), + cl::values(clEnumValN(ExceptionHandling::None, "default", + "default exception handling model"), + clEnumValN(ExceptionHandling::DwarfCFI, "dwarf", + "DWARF-like CFI based exception handling"), + clEnumValN(ExceptionHandling::SjLj, "sjlj", + "SjLj exception handling"), + clEnumValN(ExceptionHandling::ARM, "arm", + "ARM EHABI exceptions"), + clEnumValN(ExceptionHandling::WinEH, "wineh", + "Windows exception model"), + clEnumValEnd)); + cl::opt<TargetMachine::CodeGenFileType> FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile), cl::desc("Choose a file type (not all types are supported by all targets):"), @@ -293,6 +309,7 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() { Options.FunctionSections = FunctionSections; Options.UniqueSectionNames = UniqueSectionNames; Options.EmulatedTLS = EmulatedTLS; + Options.ExceptionModel = ExceptionModel; Options.MCOptions = InitMCTargetOptionsFromFlags(); Options.JTType = JTableType; diff --git a/llvm/include/llvm/MC/MCAsmInfo.h b/llvm/include/llvm/MC/MCAsmInfo.h index b879e6d43e2..82652838742 100644 --- a/llvm/include/llvm/MC/MCAsmInfo.h +++ b/llvm/include/llvm/MC/MCAsmInfo.h @@ -535,6 +535,10 @@ public: ExceptionHandling getExceptionHandlingType() const { return ExceptionsType; } WinEH::EncodingType getWinEHEncodingType() const { return WinEHEncodingType; } + void setExceptionsType(ExceptionHandling EH) { + ExceptionsType = EH; + } + /// Returns true if the exception handling method for the platform uses call /// frame information to unwind. bool usesCFIForEH() const { diff --git a/llvm/lib/Analysis/EHPersonalities.cpp b/llvm/lib/Analysis/EHPersonalities.cpp index 90aa8f58f35..5f951f5112e 100644 --- a/llvm/lib/Analysis/EHPersonalities.cpp +++ b/llvm/lib/Analysis/EHPersonalities.cpp @@ -27,7 +27,9 @@ EHPersonality llvm::classifyEHPersonality(const Value *Pers) { return StringSwitch<EHPersonality>(F->getName()) .Case("__gnat_eh_personality", EHPersonality::GNU_Ada) .Case("__gxx_personality_v0", EHPersonality::GNU_CXX) + .Case("__gxx_personality_sj0", EHPersonality::GNU_CXX_SjLj) .Case("__gcc_personality_v0", EHPersonality::GNU_C) + .Case("__gcc_personality_sj0", EHPersonality::GNU_C_SjLj) .Case("__objc_personality_v0", EHPersonality::GNU_ObjC) .Case("_except_handler3", EHPersonality::MSVC_X86SEH) .Case("_except_handler4", EHPersonality::MSVC_X86SEH) diff --git a/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/llvm/lib/CodeGen/LLVMTargetMachine.cpp index 346555a3deb..b164cad7d14 100644 --- a/llvm/lib/CodeGen/LLVMTargetMachine.cpp +++ b/llvm/lib/CodeGen/LLVMTargetMachine.cpp @@ -75,6 +75,9 @@ void LLVMTargetMachine::initAsmInfo() { TmpAsmInfo->setRelaxELFRelocations(Options.RelaxELFRelocations); + if (Options.ExceptionModel != ExceptionHandling::None) + TmpAsmInfo->setExceptionsType(Options.ExceptionModel); + AsmInfo = TmpAsmInfo; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index f3b4c70b712..12240ff5407 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -405,6 +405,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // LLVM/Clang supports zero-cost DWARF exception handling. setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); + setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); + if (TM.Options.ExceptionModel == ExceptionHandling::SjLj) + setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); // Darwin ABI issue. for (auto VT : { MVT::i32, MVT::i64 }) { @@ -448,7 +451,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // FIXME - use subtarget debug flags if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() && - !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64()) { + !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() && + TM.Options.ExceptionModel != ExceptionHandling::SjLj) { setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); } @@ -17841,6 +17845,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget return DAG.getNode(Opcode, dl, VTs, NewOps); } + case Intrinsic::eh_sjlj_lsda: { + MachineFunction &MF = DAG.getMachineFunction(); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); + auto &Context = MF.getMMI().getContext(); + MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") + + Twine(MF.getFunctionNumber())); + return DAG.getNode(X86ISD::Wrapper, dl, VT, DAG.getMCSymbol(S, PtrVT)); + } + case Intrinsic::x86_seh_lsda: { // Compute the symbol for the LSDA. We know it'll get emitted later. MachineFunction &MF = DAG.getMachineFunction(); @@ -18487,6 +18501,13 @@ SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, Op.getOperand(0), Op.getOperand(1)); } +SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other, + Op.getOperand(0)); +} + static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { return Op.getOperand(0); } @@ -21401,6 +21422,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); + case ISD::EH_SJLJ_SETUP_DISPATCH: + return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); @@ -21827,6 +21850,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; + case X86ISD::EH_SJLJ_SETUP_DISPATCH: + return "X86ISD::EH_SJLJ_SETUP_DISPATCH"; case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; @@ -23613,6 +23638,237 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, return MBB; } +void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr *MI, + MachineBasicBlock *MBB, + MachineBasicBlock *DispatchBB, + int FI) const { + DebugLoc DL = MI->getDebugLoc(); + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo *MRI = &MF->getRegInfo(); + const TargetInstrInfo *TII = Subtarget.getInstrInfo(); + + MVT PVT = getPointerTy(MF->getDataLayout()); + assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"); + + unsigned Op = 0; + unsigned VR = 0; + + Reloc::Model RM = MF->getTarget().getRelocationModel(); + bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) && + (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); + + if (UseImmLabel) { + Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; + } else { + const TargetRegisterClass *TRC = + (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; + VR = MRI->createVirtualRegister(TRC); + Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; + + /* const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII); */ + + if (Subtarget.is64Bit()) + BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR) + .addReg(X86::RIP) + .addImm(1) + .addReg(0) + .addMBB(DispatchBB) + .addReg(0); + else + BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR) + .addReg(0) /* XII->getGlobalBaseReg(MF) */ + .addImm(1) + .addReg(0) + .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference()) + .addReg(0); + } + + MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op)); + addFrameReference(MIB, FI, 36); + if (UseImmLabel) + MIB.addMBB(DispatchBB); + else + MIB.addReg(VR); +} + +MachineBasicBlock * +X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI, + MachineBasicBlock *BB) const { + DebugLoc DL = MI->getDebugLoc(); + MachineFunction *MF = BB->getParent(); + MachineModuleInfo *MMI = &MF->getMMI(); + MachineFrameInfo *MFI = MF->getFrameInfo(); + MachineRegisterInfo *MRI = &MF->getRegInfo(); + const TargetInstrInfo *TII = Subtarget.getInstrInfo(); + int FI = MFI->getFunctionContextIndex(); + + // Get a mapping of the call site numbers to all of the landing pads they're + // associated with. + DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad; + unsigned MaxCSNum = 0; + for (auto &MBB : *MF) { + if (!MBB.isEHPad()) + continue; + + MCSymbol *Sym = nullptr; + for (const auto &MI : MBB) { + if (MI.isDebugValue()) + continue; + + assert(MI.isEHLabel() && "expected EH_LABEL"); + Sym = MI.getOperand(0).getMCSymbol(); + break; + } + + if (!MMI->hasCallSiteLandingPad(Sym)) + continue; + + for (unsigned CSI : MMI->getCallSiteLandingPad(Sym)) { + CallSiteNumToLPad[CSI].push_back(&MBB); + MaxCSNum = std::max(MaxCSNum, CSI); + } + } + + // Get an ordered list of the machine basic blocks for the jump table. + std::vector<MachineBasicBlock *> LPadList; + SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs; + LPadList.reserve(CallSiteNumToLPad.size()); + + for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) { + for (auto &LP : CallSiteNumToLPad[CSI]) { + LPadList.push_back(LP); + InvokeBBs.insert(LP->pred_begin(), LP->pred_end()); + } + } + + assert(!LPadList.empty() && + "No landing pad destinations for the dispatch jump table!"); + + // Create the MBBs for the dispatch code. + + // Shove the dispatch's address into the return slot in the function context. + MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); + DispatchBB->setIsEHPad(true); + + MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); + BuildMI(TrapBB, DL, TII->get(X86::TRAP)); + DispatchBB->addSuccessor(TrapBB); + + MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); + DispatchBB->addSuccessor(DispContBB); + + // Insert MBBs. + MF->push_back(DispatchBB); + MF->push_back(DispContBB); + MF->push_back(TrapBB); + + // Insert code into the entry block that creates and registers the function + // context. + SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI); + + // Create the jump table and associated information + MachineJumpTableInfo *JTI = + MF->getOrCreateJumpTableInfo(getJumpTableEncoding()); + unsigned MJTI = JTI->createJumpTableIndex(LPadList); + + const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII); + const X86RegisterInfo &RI = XII->getRegisterInfo(); + + // Add a register mask with no preserved registers. This results in all + // registers being marked as clobbered. + if (RI.hasBasePointer(*MF)) { + const bool FPIs64Bit = + Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); + X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>(); + MFI->setRestoreBasePointer(MF); + + unsigned FP = RI.getFrameRegister(*MF); + unsigned BP = RI.getBaseRegister(); + unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm; + addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true, + MFI->getRestoreBasePointerOffset()) + .addRegMask(RI.getNoPreservedMask()); + } else { + BuildMI(DispatchBB, DL, TII->get(X86::NOOP)) + .addRegMask(RI.getNoPreservedMask()); + } + + unsigned IReg = MRI->createVirtualRegister(&X86::GR32RegClass); + addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI, + 4); + BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri)) + .addReg(IReg) + .addImm(LPadList.size()); + BuildMI(DispatchBB, DL, TII->get(X86::JA_1)).addMBB(TrapBB); + + unsigned JReg = MRI->createVirtualRegister(&X86::GR32RegClass); + BuildMI(DispContBB, DL, TII->get(X86::SUB32ri), JReg) + .addReg(IReg) + .addImm(1); + BuildMI(DispContBB, DL, + TII->get(Subtarget.is64Bit() ? X86::JMP64m : X86::JMP32m)) + .addReg(0) + .addImm(Subtarget.is64Bit() ? 8 : 4) + .addReg(JReg) + .addJumpTableIndex(MJTI) + .addReg(0); + + // Add the jump table entries as successors to the MBB. + SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs; + for (auto &LP : LPadList) + if (SeenMBBs.insert(LP).second) + DispContBB->addSuccessor(LP); + + // N.B. the order the invoke BBs are processed in doesn't matter here. + SmallVector<MachineBasicBlock *, 64> MBBLPads; + const MCPhysReg *SavedRegs = + Subtarget.getRegisterInfo()->getCalleeSavedRegs(MF); + for (MachineBasicBlock *MBB : InvokeBBs) { + // Remove the landing pad successor from the invoke block and replace it + // with the new dispatch block + for (auto MBBS : make_range(MBB->succ_rbegin(), MBB->succ_rend())) { + if (MBBS->isEHPad()) { + MBB->removeSuccessor(MBBS); + MBBLPads.push_back(MBBS); + } + } + + MBB->addSuccessor(DispatchBB); + + // Find the invoke call and mark all of the callee-saved registers as + // 'implicit defined' so that they're spilled. This prevents code from + // moving instructions to before the EH block, where they will never be + // executed. + for (auto &II : make_range(MBB->rbegin(), MBB->rend())) { + if (!II.isCall()) + continue; + + DenseMap<unsigned, bool> DefRegs; + for (auto &MOp : II.operands()) + if (MOp.isReg()) + DefRegs[MOp.getReg()] = true; + + MachineInstrBuilder MIB(*MF, &II); + for (unsigned RI = 0; SavedRegs[RI]; ++RI) { + unsigned Reg = SavedRegs[RI]; + if (!DefRegs[Reg]) + MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); + } + + break; + } + } + + // Mark all former landing pads as non-landing pads. The dispatch is the only + // landing pad now. + for (auto &LP : MBBLPads) + LP->setIsEHPad(false); + + // The instruction is gone now. + MI->eraseFromParent(); + return BB; +} + // Replace 213-type (isel default) FMA3 instructions with 231-type for // accumulator loops. Writing back to the accumulator allows the coalescer // to remove extra copies in the loop. @@ -23917,6 +24173,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case X86::EH_SjLj_LongJmp64: return emitEHSjLjLongJmp(MI, BB); + case X86::Int_eh_sjlj_setup_dispatch: + return EmitSjLjDispatchBlock(MI, BB); + case TargetOpcode::STATEPOINT: // As an implementation detail, STATEPOINT shares the STACKMAP format at // this point in the process. We diverge later. diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 44125061a6c..ab7cf955cce 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -272,6 +272,9 @@ namespace llvm { // SjLj exception handling longjmp. EH_SJLJ_LONGJMP, + // SjLj exception handling dispatch. + EH_SJLJ_SETUP_DISPATCH, + /// Tail call return. See X86TargetLowering::LowerCall for /// the list of operands. TC_RETURN, @@ -1093,6 +1096,7 @@ namespace llvm { SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const; @@ -1148,6 +1152,9 @@ namespace llvm { bool needsCmpXchgNb(Type *MemType) const; + void SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, + MachineBasicBlock *DispatchBB, int FI) const; + // Utility function to emit the low-level va_arg code for X86-64. MachineBasicBlock *EmitVAARG64WithCustomInserter( MachineInstr *MI, @@ -1188,6 +1195,9 @@ namespace llvm { MachineBasicBlock *emitFMA3Instr(MachineInstr *MI, MachineBasicBlock *MBB) const; + MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI, + MachineBasicBlock *MBB) const; + /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl, diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 10a8d0efa7f..0598dd8c2e9 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -232,6 +232,9 @@ def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP", def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP", SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, [SDNPHasChain, SDNPSideEffect]>; +def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH", + SDTypeProfile<0, 0, []>, + [SDNPHasChain, SDNPSideEffect]>; def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; @@ -1086,6 +1089,10 @@ def LEAVE64 : I<0xC9, RawFrm, // Miscellaneous Instructions. // +let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1 in + def Int_eh_sjlj_setup_dispatch + : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>; + let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in { let mayLoad = 1, SchedRW = [WriteLoad] in { def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [], diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 7a5964519a7..9e97d7b2cc0 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2388,6 +2388,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) { switch (Personality) { case EHPersonality::GNU_C: + case EHPersonality::GNU_C_SjLj: case EHPersonality::Rust: // The GCC C EH and Rust personality only exists to support cleanups, so // it's not clear what the semantics of catch clauses are. @@ -2399,6 +2400,7 @@ static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) { // match foreign exceptions (or didn't, before gcc-4.7). return false; case EHPersonality::GNU_CXX: + case EHPersonality::GNU_CXX_SjLj: case EHPersonality::GNU_ObjC: case EHPersonality::MSVC_X86SEH: case EHPersonality::MSVC_Win64SEH: diff --git a/llvm/test/CodeGen/X86/sjlj-eh.ll b/llvm/test/CodeGen/X86/sjlj-eh.ll new file mode 100644 index 00000000000..4d2e4e821f4 --- /dev/null +++ b/llvm/test/CodeGen/X86/sjlj-eh.ll @@ -0,0 +1,72 @@ +; RUN: llc -mtriple i386-windows-gnu -exception-model sjlj -filetype asm -o - %s | FileCheck %s + +declare void @_Z20function_that_throwsv() +declare i32 @__gxx_personality_sj0(...) +declare i8* @__cxa_begin_catch(i8*) +declare void @__cxa_end_catch() + +define void @_Z8functionv() personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) { +entry: + invoke void @_Z20function_that_throwsv() + to label %try.cont unwind label %lpad + +lpad: + %0 = landingpad { i8*, i32 } + catch i8* null + %1 = extractvalue { i8*, i32 } %0, 0 + %2 = tail call i8* @__cxa_begin_catch(i8* %1) + tail call void @__cxa_end_catch() + br label %try.cont + +try.cont: + ret void +} + +; struct _Unwind_FunctionContext { +; +00 struct _Unwind_FunctionContext *prev; -64(%ebp) +; +04 uintptr_t __callsite; -60(%ebp) +; +08 uintptr_t __buffer[4]; -44(%ebp) +; +28 __personality_routine __personality; -40(%ebp) +; +32 uintptr_t __lsda; -36(%ebp) +; +36 void *__jbuf[]; -32(%ebp) +; }; + + +; CHECK-LABEL: __Z8functionv: +; struct _Unwind_FunctionContext UFC; +; +; UFC.__personality = __gxx_personality_sj0 +; CHECK: movl $___gxx_personality_sj0, -40(%ebp) +; UFC.__lsda = $LSDA +; CHECK: movl $[[LSDA:GCC_except_table[0-9]+]], -36(%ebp) +; UFC.__jbuf[0] = $EBP +; CHECK: movl %ebp, -32(%ebp) +; UFC.__jbuf[2] = $ESP +; CHECK: movl %esp, -24(%ebp) +; UFC.__jbuf[1] = $EIP +; CHECK: movl $[[RESUME:LBB[0-9]+_[0-9]+]], -28(%ebp) +; UFC.__callsite = 1 +; CHECK: movl $1, -60(%ebp) +; _Unwind_SjLj_Register(&UFC); +; CHECK: leal -64(%ebp), %eax +; CHECK: pushl %eax +; CHECK: calll __Unwind_SjLj_Register +; CHECK: addl $4, %esp +; function_that_throws(); +; CHECK: calll __Z20function_that_throwsv +; _Unwind_SjLj_Unregister(&UFC); +; CHECK: leal -64(%ebp), %eax +; CHECK: calll __Unwind_SjLj_Unregister +; +; CHECK: [[RESUME]]: +; CHECK: leal -64(%ebp), %esi +; assert(UFC.__callsite <= 1); +; CHECK: movl -60(%ebp), %eax +; CHECK: cmpl $1, %eax +; CHECK: jbe [[CONT:LBB[0-9]+_[0-9]+]] +; CHECK: ud2 +; CHECK: [[CONT]]: +; *Handlers[--UFC.__callsite] +; CHECK: subl $1, %eax +; CHECK: jmpl *LJTI + |