summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2018-08-16 21:30:05 +0000
committerChandler Carruth <chandlerc@gmail.com>2018-08-16 21:30:05 +0000
commitc73c0307fe71a4f1a98d99dbc5d7852d44c30fff (patch)
treefd4bce21f4d2d9e151c95e321832cf4f36695ba6 /llvm/lib/Target/X86
parent66cf14d06b1c5d20417e312fabd14ffaf4314ae3 (diff)
downloadbcm5719-llvm-c73c0307fe71a4f1a98d99dbc5d7852d44c30fff.tar.gz
bcm5719-llvm-c73c0307fe71a4f1a98d99dbc5d7852d44c30fff.zip
[MI] Change the array of `MachineMemOperand` pointers to be
a generically extensible collection of extra info attached to a `MachineInstr`. The primary change here is cleaning up the APIs used for setting and manipulating the `MachineMemOperand` pointer arrays so chat we can change how they are allocated. Then we introduce an extra info object that using the trailing object pattern to attach some number of MMOs but also other extra info. The design of this is specifically so that this extra info has a fixed necessary cost (the header tracking what extra info is included) and everything else can be tail allocated. This pattern works especially well with a `BumpPtrAllocator` which we use here. I've also added the basic scaffolding for putting interesting pointers into this, namely pre- and post-instruction symbols. These aren't used anywhere yet, they're just there to ensure I've actually gotten the data structure types correct. I'll flesh out support for these in a subsequent patch (MIR dumping, parsing, the works). Finally, I've included an optimization where we store any single pointer inline in the `MachineInstr` to avoid the allocation overhead. This is expected to be the overwhelmingly most common case and so should avoid any memory usage growth due to slightly less clever / dense allocation when dealing with >1 MMO. This did require several ergonomic improvements to the `PointerSumType` to reasonably support the various usage models. This also has a side effect of freeing up 8 bits within the `MachineInstr` which could be repurposed for something else. The suggested direction here came largely from Hal Finkel. I hope it was worth it. ;] It does hopefully clear a path for subsequent extensions w/o nearly as much leg work. Lots of thanks to Reid and Justin for careful reviews and ideas about how to do all of this. Differential Revision: https://reviews.llvm.org/D50701 llvm-svn: 339940
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86FixupBWInsts.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp42
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp43
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h6
5 files changed, 43 insertions, 52 deletions
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
index d9bf60c2c9f..ed297e67820 100644
--- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
@@ -288,7 +288,7 @@ MachineInstr *FixupBWInstPass::tryReplaceLoad(unsigned New32BitOpcode,
for (unsigned i = 1; i < NumArgs; ++i)
MIB.add(MI->getOperand(i));
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
return MIB;
}
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index c17c51a7aea..0a4914d338e 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -1048,7 +1048,7 @@ void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
MIB.addReg(CondReg);
- MIB->setMemRefs(SetCCI.memoperands_begin(), SetCCI.memoperands_end());
+ MIB.setMemRefs(SetCCI.memoperands());
SetCCI.eraseFromParent();
return;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2042ee4c920..204238a6350 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26795,8 +26795,8 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
// Memory Reference
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
// Machine Information
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
@@ -26894,7 +26894,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Check if there is enough room left to pull this argument.
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
@@ -26919,7 +26919,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.add(Index)
.addDisp(Disp, 16)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Zero-extend the offset
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
@@ -26947,7 +26947,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.addReg(NextOffsetReg)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Jump to endMBB
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
@@ -26966,7 +26966,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// If we need to align it, do so. Otherwise, just copy the address
// to OverflowDestReg.
@@ -27003,7 +27003,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.addDisp(Disp, 8)
.add(Segment)
.addReg(NextAddrReg)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// If we branched, emit the PHI to the front of endMBB.
if (offsetMBB) {
@@ -27977,8 +27977,8 @@ void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
MachineInstrBuilder MIB;
// Memory Reference.
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
// Initialize a register with zero.
MVT PVT = getPointerTy(MF->getDataLayout());
@@ -28007,7 +28007,7 @@ void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
MIB.add(MI.getOperand(MemOpndSlot + i));
}
MIB.addReg(SSPCopyReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
}
MachineBasicBlock *
@@ -28023,8 +28023,8 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
unsigned DstReg;
unsigned MemOpndSlot = 0;
@@ -28118,7 +28118,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MIB.addReg(LabelReg);
else
MIB.addMBB(restoreMBB);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
emitSetJmpShadowStackFix(MI, thisMBB);
@@ -28179,8 +28179,8 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
@@ -28267,7 +28267,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Subtract the current SSP from the previous SSP.
unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
@@ -28351,8 +28351,8 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
@@ -28385,7 +28385,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Reload IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
@@ -28395,7 +28395,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Reload SP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
@@ -28405,7 +28405,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Jump
BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 4073f455a81..513011a1cf6 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3308,24 +3308,21 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addReg(SrcReg, getKillRegState(isKill));
}
-void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+void X86InstrInfo::storeRegToAddr(
+ MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
+ ArrayRef<MachineMemOperand *> MMOs,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = MMOBegin != MMOEnd &&
- (*MMOBegin)->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
MIB.addReg(SrcReg, getKillRegState(isKill));
- (*MIB).setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
@@ -3345,22 +3342,20 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
-void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+void X86InstrInfo::loadRegFromAddr(
+ MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
+ ArrayRef<MachineMemOperand *> MMOs,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = MMOBegin != MMOEnd &&
- (*MMOBegin)->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
- (*MIB).setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
@@ -5450,9 +5445,8 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the load instruction.
if (UnfoldLoad) {
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
- MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
+ auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
+ loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
@@ -5517,9 +5511,8 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
- MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
+ auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
+ storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs);
}
return true;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index b1ceb767cce..4cad655ad52 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -359,8 +359,7 @@ public:
void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
+ ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
@@ -371,8 +370,7 @@ public:
void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
+ ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const;
bool expandPostRAPseudo(MachineInstr &MI) const override;
OpenPOWER on IntegriCloud