summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp49
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h3
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp29
3 files changed, 71 insertions, 10 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f4c926ec8b6..fc52c0cd669 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -15811,6 +15811,51 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
return MBB;
}
+/// Convert any TargetFrameIndex operands into the x86-specific pattern of five
+/// memory operands that is recognized by PrologEpilogInserter.
+MachineBasicBlock *
+X86TargetLowering::emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const TargetMachine &TM = getTargetMachine();
+ const X86InstrInfo *TII = static_cast<const X86InstrInfo*>(TM.getInstrInfo());
+
+ // MI changes inside this loop as we grow operands.
+ for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
+ MachineOperand &MO = MI->getOperand(OperIdx);
+ if (!MO.isFI())
+ continue;
+
+ // foldMemoryOperand builds a new MI after replacing a single FI operand
+ // with the canonical set of five x86 addressing-mode operands.
+ int FI = MO.getIndex();
+ MachineFunction &MF = *MBB->getParent();
+ SmallVector<unsigned, 1> FIOps(1, OperIdx);
+ MachineInstr *NewMI = TII->foldMemoryOperandImpl(MF, MI, FIOps, FI);
+ assert(NewMI && "Cannot fold frame index operand into stackmap.");
+
+ // Inherit previous memory operands.
+ NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ assert(NewMI->mayLoad() && "Folded a stackmap use to a non-load!");
+
+ // Add a new memory operand for this FI.
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ assert(MFI.getObjectOffset(FI) != -1);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
+ MachineMemOperand::MOLoad,
+ TM.getDataLayout()->getPointerSize(),
+ MFI.getObjectAlignment(FI));
+ NewMI->addMemOperand(MF, MMO);
+
+ // Replace the instruction and update the operand index.
+ MBB->insert(MachineBasicBlock::iterator(MI), NewMI);
+ OperIdx += (NewMI->getNumOperands() - MI->getNumOperands()) - 1;
+ MI->eraseFromParent();
+ MI = NewMI;
+ }
+ return MBB;
+}
+
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -16038,6 +16083,10 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::EH_SjLj_LongJmp32:
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
+
+ case TargetOpcode::STACKMAP:
+ case TargetOpcode::PATCHPOINT:
+ return emitPatchPoint(MI, BB);
}
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index bc3dd608da5..6231e253d21 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -973,6 +973,9 @@ namespace llvm {
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 24617737420..ad46c10c7d0 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4237,18 +4237,27 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF,
for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
MachineOperand &MO = MI->getOperand(i);
if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
- assert(MO.getReg() && "patchpoint can only fold a vreg operand");
- // Compute the spill slot size and offset.
- const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(MO.getReg());
unsigned SpillSize;
unsigned SpillOffset;
- bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
- SpillOffset, &MF.getTarget());
- if (!Valid)
- report_fatal_error("cannot spill patchpoint subregister operand");
-
- MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
- MIB.addOperand(MachineOperand::CreateImm(SpillSize));
+ if (MO.isReg()) {
+ // Compute the spill slot size and offset.
+ const TargetRegisterClass *RC =
+ MF.getRegInfo().getRegClass(MO.getReg());
+ bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
+ SpillOffset, &MF.getTarget());
+ if (!Valid)
+ report_fatal_error("cannot spill patchpoint subregister operand");
+ MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
+ MIB.addOperand(MachineOperand::CreateImm(SpillSize));
+ }
+ else {
+ // ExpandISelPseudos is converting a simple frame index into a 5-operand
+ // frame index.
+ assert(MO.isFI() && MO.getIndex() == FrameIndex &&
+ "patchpoint can only fold a vreg operand or frame index");
+ SpillOffset = 0;
+ MIB.addOperand(MachineOperand::CreateImm(StackMaps::DirectMemRefOp));
+ }
MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
addOffset(MIB, SpillOffset);
}
OpenPOWER on IntegriCloud