diff options
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 19 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 49 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 3 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 29 |
4 files changed, 90 insertions, 10 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 7bb1929401a..b1d247e74fd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6783,6 +6783,21 @@ SelectionDAGBuilder::LowerCallOperands(const CallInst &CI, unsigned ArgIdx, /// \brief Add a stack map intrinsic call's live variable operands to a stackmap /// or patchpoint target node's operand list. +/// +/// Constants are converted to TargetConstants purely as an optimization to +/// avoid constant materialization and register allocation. +/// +/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not +/// generate addess computation nodes, and so ExpandISelPseudo can convert the +/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids +/// address materialization and register allocation, but may also be required +/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an +/// alloca in the entry block, then the runtime may assume that the alloca's +/// StackMap location can be read immediately after compilation and that the +/// location is valid at any point during execution (this is similar to the +/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were +/// only available in a register, then the runtime would need to trap when +/// execution reaches the StackMap in order to read the alloca's location. static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx, SmallVectorImpl<SDValue> &Ops, SelectionDAGBuilder &Builder) { @@ -6793,6 +6808,10 @@ static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx, Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64)); Ops.push_back( Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64)); + } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { + const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); + Ops.push_back( + Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy())); } else Ops.push_back(OpVal); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index f4c926ec8b6..fc52c0cd669 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -15811,6 +15811,51 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, return MBB; } +/// Convert any TargetFrameIndex operands into the x86-specific pattern of five +/// memory operands that is recognized by PrologEpilogInserter. +MachineBasicBlock * +X86TargetLowering::emitPatchPoint(MachineInstr *MI, + MachineBasicBlock *MBB) const { + const TargetMachine &TM = getTargetMachine(); + const X86InstrInfo *TII = static_cast<const X86InstrInfo*>(TM.getInstrInfo()); + + // MI changes inside this loop as we grow operands. + for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) { + MachineOperand &MO = MI->getOperand(OperIdx); + if (!MO.isFI()) + continue; + + // foldMemoryOperand builds a new MI after replacing a single FI operand + // with the canonical set of five x86 addressing-mode operands. + int FI = MO.getIndex(); + MachineFunction &MF = *MBB->getParent(); + SmallVector<unsigned, 1> FIOps(1, OperIdx); + MachineInstr *NewMI = TII->foldMemoryOperandImpl(MF, MI, FIOps, FI); + assert(NewMI && "Cannot fold frame index operand into stackmap."); + + // Inherit previous memory operands. + NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + assert(NewMI->mayLoad() && "Folded a stackmap use to a non-load!"); + + // Add a new memory operand for this FI. + const MachineFrameInfo &MFI = *MF.getFrameInfo(); + assert(MFI.getObjectOffset(FI) != -1); + MachineMemOperand *MMO = + MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), + MachineMemOperand::MOLoad, + TM.getDataLayout()->getPointerSize(), + MFI.getObjectAlignment(FI)); + NewMI->addMemOperand(MF, MMO); + + // Replace the instruction and update the operand index. + MBB->insert(MachineBasicBlock::iterator(MI), NewMI); + OperIdx += (NewMI->getNumOperands() - MI->getNumOperands()) - 1; + MI->eraseFromParent(); + MI = NewMI; + } + return MBB; +} + MachineBasicBlock * X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { @@ -16038,6 +16083,10 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case X86::EH_SjLj_LongJmp32: case X86::EH_SjLj_LongJmp64: return emitEHSjLjLongJmp(MI, BB); + + case TargetOpcode::STACKMAP: + case TargetOpcode::PATCHPOINT: + return emitPatchPoint(MI, BB); } } diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index bc3dd608da5..6231e253d21 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -973,6 +973,9 @@ namespace llvm { MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, MachineBasicBlock *MBB) const; + MachineBasicBlock *emitPatchPoint(MachineInstr *MI, + MachineBasicBlock *MBB) const; + /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 24617737420..ad46c10c7d0 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4237,18 +4237,27 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) { MachineOperand &MO = MI->getOperand(i); if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) { - assert(MO.getReg() && "patchpoint can only fold a vreg operand"); - // Compute the spill slot size and offset. - const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(MO.getReg()); unsigned SpillSize; unsigned SpillOffset; - bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, - SpillOffset, &MF.getTarget()); - if (!Valid) - report_fatal_error("cannot spill patchpoint subregister operand"); - - MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp)); - MIB.addOperand(MachineOperand::CreateImm(SpillSize)); + if (MO.isReg()) { + // Compute the spill slot size and offset. + const TargetRegisterClass *RC = + MF.getRegInfo().getRegClass(MO.getReg()); + bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, + SpillOffset, &MF.getTarget()); + if (!Valid) + report_fatal_error("cannot spill patchpoint subregister operand"); + MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp)); + MIB.addOperand(MachineOperand::CreateImm(SpillSize)); + } + else { + // ExpandISelPseudos is converting a simple frame index into a 5-operand + // frame index. + assert(MO.isFI() && MO.getIndex() == FrameIndex && + "patchpoint can only fold a vreg operand or frame index"); + SpillOffset = 0; + MIB.addOperand(MachineOperand::CreateImm(StackMaps::DirectMemRefOp)); + } MIB.addOperand(MachineOperand::CreateFI(FrameIndex)); addOffset(MIB, SpillOffset); } |