summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp142
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h19
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.cpp122
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.h28
4 files changed, 203 insertions, 108 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 10e594763e0..1c79209bf1b 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -756,6 +756,29 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
+static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
+ MachineOperand &MO) {
+ if (MO.isRegister())
+ MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
+ false, false, MO.getSubReg());
+ else if (MO.isImmediate())
+ MIB = MIB.addImm(MO.getImm());
+ else if (MO.isFrameIndex())
+ MIB = MIB.addFrameIndex(MO.getIndex());
+ else if (MO.isGlobalAddress())
+ MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
+ else if (MO.isConstantPoolIndex())
+ MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
+ else if (MO.isJumpTableIndex())
+ MIB = MIB.addJumpTableIndex(MO.getIndex());
+ else if (MO.isExternalSymbol())
+ MIB = MIB.addExternalSymbol(MO.getSymbolName());
+ else
+ assert(0 && "Unknown operand for X86InstrAddOperand!");
+
+ return MIB;
+}
+
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
@@ -852,6 +875,125 @@ void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
}
+static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
+ unsigned StackAlign) {
+ unsigned Opc = 0;
+ if (RC == &X86::GR64RegClass) {
+ Opc = X86::MOV64mr;
+ } else if (RC == &X86::GR32RegClass) {
+ Opc = X86::MOV32mr;
+ } else if (RC == &X86::GR16RegClass) {
+ Opc = X86::MOV16mr;
+ } else if (RC == &X86::GR8RegClass) {
+ Opc = X86::MOV8mr;
+ } else if (RC == &X86::GR32_RegClass) {
+ Opc = X86::MOV32_mr;
+ } else if (RC == &X86::GR16_RegClass) {
+ Opc = X86::MOV16_mr;
+ } else if (RC == &X86::RFP80RegClass) {
+ Opc = X86::ST_FpP80m; // pops
+ } else if (RC == &X86::RFP64RegClass) {
+ Opc = X86::ST_Fp64m;
+ } else if (RC == &X86::RFP32RegClass) {
+ Opc = X86::ST_Fp32m;
+ } else if (RC == &X86::FR32RegClass) {
+ Opc = X86::MOVSSmr;
+ } else if (RC == &X86::FR64RegClass) {
+ Opc = X86::MOVSDmr;
+ } else if (RC == &X86::VR128RegClass) {
+ // FIXME: Use movaps once we are capable of selectively
+ // aligning functions that spill SSE registers on 16-byte boundaries.
+ Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
+ } else if (RC == &X86::VR64RegClass) {
+ Opc = X86::MMX_MOVQ64mr;
+ } else {
+ assert(0 && "Unknown regclass");
+ abort();
+ }
+
+ return Opc;
+}
+
+void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIdx,
+ const TargetRegisterClass *RC) const {
+ unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
+ addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
+ .addReg(SrcReg, false, false, isKill);
+}
+
+void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
+ bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
+ MachineInstrBuilder MIB = BuildMI(get(Opc));
+ for (unsigned i = 0, e = Addr.size(); i != e; ++i)
+ MIB = X86InstrAddOperand(MIB, Addr[i]);
+ MIB.addReg(SrcReg, false, false, isKill);
+ NewMIs.push_back(MIB);
+}
+
+static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
+ unsigned StackAlign) {
+ unsigned Opc = 0;
+ if (RC == &X86::GR64RegClass) {
+ Opc = X86::MOV64rm;
+ } else if (RC == &X86::GR32RegClass) {
+ Opc = X86::MOV32rm;
+ } else if (RC == &X86::GR16RegClass) {
+ Opc = X86::MOV16rm;
+ } else if (RC == &X86::GR8RegClass) {
+ Opc = X86::MOV8rm;
+ } else if (RC == &X86::GR32_RegClass) {
+ Opc = X86::MOV32_rm;
+ } else if (RC == &X86::GR16_RegClass) {
+ Opc = X86::MOV16_rm;
+ } else if (RC == &X86::RFP80RegClass) {
+ Opc = X86::LD_Fp80m;
+ } else if (RC == &X86::RFP64RegClass) {
+ Opc = X86::LD_Fp64m;
+ } else if (RC == &X86::RFP32RegClass) {
+ Opc = X86::LD_Fp32m;
+ } else if (RC == &X86::FR32RegClass) {
+ Opc = X86::MOVSSrm;
+ } else if (RC == &X86::FR64RegClass) {
+ Opc = X86::MOVSDrm;
+ } else if (RC == &X86::VR128RegClass) {
+ // FIXME: Use movaps once we are capable of selectively
+ // aligning functions that spill SSE registers on 16-byte boundaries.
+ Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
+ } else if (RC == &X86::VR64RegClass) {
+ Opc = X86::MMX_MOVQ64rm;
+ } else {
+ assert(0 && "Unknown regclass");
+ abort();
+ }
+
+ return Opc;
+}
+
+void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC) const{
+ unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
+ addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
+}
+
+void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
+ MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
+ for (unsigned i = 0, e = Addr.size(); i != e; ++i)
+ MIB = X86InstrAddOperand(MIB, Addr[i]);
+ NewMIs.push_back(MIB);
+}
+
bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index e6ca781714f..4a6a3a085c9 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -284,6 +284,25 @@ public:
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
+ virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC) const;
+
+ virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC) const;
+
+ virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index b3203662b34..f86d5a00c55 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -836,86 +836,6 @@ static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
return Opc;
}
-void X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIdx,
- const TargetRegisterClass *RC) const {
- unsigned Opc = getStoreRegOpcode(RC, StackAlign);
- addFrameReference(BuildMI(MBB, MI, TII.get(Opc)), FrameIdx)
- .addReg(SrcReg, false, false, isKill);
-}
-
-void X86RegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Opc = getStoreRegOpcode(RC, StackAlign);
- MachineInstrBuilder MIB = BuildMI(TII.get(Opc));
- for (unsigned i = 0, e = Addr.size(); i != e; ++i)
- MIB = X86InstrAddOperand(MIB, Addr[i]);
- MIB.addReg(SrcReg, false, false, isKill);
- NewMIs.push_back(MIB);
-}
-
-static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
- unsigned StackAlign) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32RegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8RegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR32_RegClass) {
- Opc = X86::MOV32_rm;
- } else if (RC == &X86::GR16_RegClass) {
- Opc = X86::MOV16_rm;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::LD_Fp80m;
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::LD_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::LD_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSrm;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDrm;
- } else if (RC == &X86::VR128RegClass) {
- // FIXME: Use movaps once we are capable of selectively
- // aligning functions that spill SSE registers on 16-byte boundaries.
- Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rm;
- } else {
- assert(0 && "Unknown regclass");
- abort();
- }
-
- return Opc;
-}
-
-void X86RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC) const{
- unsigned Opc = getLoadRegOpcode(RC, StackAlign);
- addFrameReference(BuildMI(MBB, MI, TII.get(Opc), DestReg), FrameIdx);
-}
-
-void X86RegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Opc = getLoadRegOpcode(RC, StackAlign);
- MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
- for (unsigned i = 0, e = Addr.size(); i != e; ++i)
- MIB = X86InstrAddOperand(MIB, Addr[i]);
- NewMIs.push_back(MIB);
-}
-
const TargetRegisterClass *
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &X86::CCRRegClass)
@@ -1229,7 +1149,7 @@ bool X86RegisterInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the load instruction.
if (UnfoldLoad) {
- loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs);
+ TII.loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 5; ++i) {
@@ -1286,12 +1206,50 @@ bool X86RegisterInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
const TargetOperandInfo &DstTOI = TID.OpInfo[0];
const TargetRegisterClass *DstRC = (DstTOI.Flags & M_LOOK_UP_PTR_REG_CLASS)
? TII.getPointerRegClass() : getRegClass(DstTOI.RegClass);
- storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs);
+ TII.storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs);
}
return true;
}
+static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
+ unsigned StackAlign) {
+ unsigned Opc = 0;
+ if (RC == &X86::GR64RegClass) {
+ Opc = X86::MOV64rm;
+ } else if (RC == &X86::GR32RegClass) {
+ Opc = X86::MOV32rm;
+ } else if (RC == &X86::GR16RegClass) {
+ Opc = X86::MOV16rm;
+ } else if (RC == &X86::GR8RegClass) {
+ Opc = X86::MOV8rm;
+ } else if (RC == &X86::GR32_RegClass) {
+ Opc = X86::MOV32_rm;
+ } else if (RC == &X86::GR16_RegClass) {
+ Opc = X86::MOV16_rm;
+ } else if (RC == &X86::RFP80RegClass) {
+ Opc = X86::LD_Fp80m;
+ } else if (RC == &X86::RFP64RegClass) {
+ Opc = X86::LD_Fp64m;
+ } else if (RC == &X86::RFP32RegClass) {
+ Opc = X86::LD_Fp32m;
+ } else if (RC == &X86::FR32RegClass) {
+ Opc = X86::MOVSSrm;
+ } else if (RC == &X86::FR64RegClass) {
+ Opc = X86::MOVSDrm;
+ } else if (RC == &X86::VR128RegClass) {
+ // FIXME: Use movaps once we are capable of selectively
+ // aligning functions that spill SSE registers on 16-byte boundaries.
+ Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
+ } else if (RC == &X86::VR64RegClass) {
+ Opc = X86::MMX_MOVQ64rm;
+ } else {
+ assert(0 && "Unknown regclass");
+ abort();
+ }
+
+ return Opc;
+}
bool
X86RegisterInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 17c22bfdb82..0695b3fe616 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -85,6 +85,8 @@ public:
/// register identifier.
unsigned getX86RegNum(unsigned RegNo);
+ unsigned getStackAlignment() const { return StackAlign; }
+
/// getDwarfRegNum - allows modification of X86GenRegisterInfo::getDwarfRegNum
/// (created by TableGen) for target dependencies.
int getDwarfRegNum(unsigned RegNum, bool isEH) const;
@@ -98,32 +100,6 @@ public:
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const;
-
- void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- void copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const;
const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass *RC) const;
OpenPOWER on IntegriCloud