summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/BranchFolding.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp5
-rw-r--r--llvm/lib/CodeGen/ImplicitNullChecks.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIParser.cpp5
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp77
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp216
-rw-r--r--llvm/lib/CodeGen/MachineOutliner.cpp2
-rw-r--r--llvm/lib/CodeGen/MachinePipeliner.cpp16
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp9
-rw-r--r--llvm/lib/CodeGen/StackColoring.cpp12
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfo.cpp6
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp26
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp42
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp19
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp15
-rw-r--r--llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp24
-rw-r--r--llvm/lib/Target/ARM/ARMInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp8
-rw-r--r--llvm/lib/Target/ARM/Thumb2SizeReduction.cpp4
-rw-r--r--llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp52
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp9
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp56
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp21
-rw-r--r--llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp3
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp38
-rw-r--r--llvm/lib/Target/SystemZ/SystemZElimCompare.cpp7
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FixupBWInsts.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp42
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp43
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h6
39 files changed, 399 insertions, 403 deletions
diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp
index c7a0c645716..fa027eedd4d 100644
--- a/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/llvm/lib/CodeGen/BranchFolding.cpp
@@ -865,7 +865,7 @@ mergeOperations(MachineBasicBlock::iterator MBBIStartPos,
// Merge MMOs from memory operations in the common block.
if (MBBICommon->mayLoad() || MBBICommon->mayStore())
- MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI));
+ MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI});
// Drop undef flags if they aren't present in all merged instructions.
for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
MachineOperand &MO = MBBICommon->getOperand(I);
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 8559d6055cc..74f51cc7292 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -706,13 +706,12 @@ void IRTranslator::getStackGuard(unsigned DstReg,
return;
MachinePointerInfo MPInfo(Global);
- MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
MachineMemOperand::MODereferenceable;
- *MemRefs =
+ MachineMemOperand *MemRef =
MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
DL->getPointerABIAlignment(0));
- MIB.setMemRefs(MemRefs, MemRefs + 1);
+ MIB.setMemRefs({MemRef});
}
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
index 0a447bc613b..558dbb08b22 100644
--- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp
+++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
@@ -651,7 +651,7 @@ MachineInstr *ImplicitNullChecks::insertFaultingInstr(
}
}
- MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
return MIB;
}
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index a61e7872f1a..86d4a380ac7 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -797,10 +797,7 @@ bool MIParser::parse(MachineInstr *&MI) {
return true;
if (MemOperands.empty())
return false;
- MachineInstr::mmo_iterator MemRefs =
- MF.allocateMemRefsArray(MemOperands.size());
- std::copy(MemOperands.begin(), MemOperands.end(), MemRefs);
- MI->setMemRefs(MemRefs, MemRefs + MemOperands.size());
+ MI->setMemRefs(MF, MemOperands);
return false;
}
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index dd668bcf619..1b15819cd50 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -406,77 +406,12 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MMO->getOrdering(), MMO->getFailureOrdering());
}
-MachineInstr::mmo_iterator
-MachineFunction::allocateMemRefsArray(unsigned long Num) {
- return Allocator.Allocate<MachineMemOperand *>(Num);
-}
-
-std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
-MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End) {
- // Count the number of load mem refs.
- unsigned Num = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
- if ((*I)->isLoad())
- ++Num;
-
- // Allocate a new array and populate it with the load information.
- MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
- unsigned Index = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
- if ((*I)->isLoad()) {
- if (!(*I)->isStore())
- // Reuse the MMO.
- Result[Index] = *I;
- else {
- // Clone the MMO and unset the store flag.
- MachineMemOperand *JustLoad =
- getMachineMemOperand((*I)->getPointerInfo(),
- (*I)->getFlags() & ~MachineMemOperand::MOStore,
- (*I)->getSize(), (*I)->getBaseAlignment(),
- (*I)->getAAInfo(), nullptr,
- (*I)->getSyncScopeID(), (*I)->getOrdering(),
- (*I)->getFailureOrdering());
- Result[Index] = JustLoad;
- }
- ++Index;
- }
- }
- return std::make_pair(Result, Result + Num);
-}
-
-std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
-MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
- MachineInstr::mmo_iterator End) {
- // Count the number of load mem refs.
- unsigned Num = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
- if ((*I)->isStore())
- ++Num;
-
- // Allocate a new array and populate it with the store information.
- MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
- unsigned Index = 0;
- for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
- if ((*I)->isStore()) {
- if (!(*I)->isLoad())
- // Reuse the MMO.
- Result[Index] = *I;
- else {
- // Clone the MMO and unset the load flag.
- MachineMemOperand *JustStore =
- getMachineMemOperand((*I)->getPointerInfo(),
- (*I)->getFlags() & ~MachineMemOperand::MOLoad,
- (*I)->getSize(), (*I)->getBaseAlignment(),
- (*I)->getAAInfo(), nullptr,
- (*I)->getSyncScopeID(), (*I)->getOrdering(),
- (*I)->getFailureOrdering());
- Result[Index] = JustStore;
- }
- ++Index;
- }
- }
- return std::make_pair(Result, Result + Num);
+MachineInstr::ExtraInfo *
+MachineFunction::createMIExtraInfo(ArrayRef<MachineMemOperand *> MMOs,
+ MCSymbol *PreInstrSymbol,
+ MCSymbol *PostInstrSymbol) {
+ return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
+ PostInstrSymbol);
}
const char *MachineFunction::createExternalSymbolName(StringRef Name) {
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 96fcfdb72ad..fb1d392e01b 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -131,8 +131,7 @@ MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
/// MachineInstr ctor - Copies MachineInstr arg exactly
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
- : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
- debugLoc(MI.getDebugLoc()) {
+ : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
CapOperands = OperandCapacity::get(MI.getNumOperands());
@@ -315,71 +314,178 @@ void MachineInstr::RemoveOperand(unsigned OpNo) {
--NumOperands;
}
-/// addMemOperand - Add a MachineMemOperand to the machine instruction.
-/// This function should be used only occasionally. The setMemRefs function
-/// is the primary method for setting up a MachineInstr's MemRefs list.
+void MachineInstr::dropMemRefs(MachineFunction &MF) {
+ if (memoperands_empty())
+ return;
+
+ // See if we can just drop all of our extra info.
+ if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
+ Info.clear();
+ return;
+ }
+ if (!getPostInstrSymbol()) {
+ Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
+ return;
+ }
+ if (!getPreInstrSymbol()) {
+ Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
+ return;
+ }
+
+ // Otherwise allocate a fresh extra info with just these symbols.
+ Info.set<EIIK_OutOfLine>(
+ MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
+}
+
+void MachineInstr::setMemRefs(MachineFunction &MF,
+ ArrayRef<MachineMemOperand *> MMOs) {
+ if (MMOs.empty()) {
+ dropMemRefs(MF);
+ return;
+ }
+
+ // Try to store a single MMO inline.
+ if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
+ Info.set<EIIK_MMO>(MMOs[0]);
+ return;
+ }
+
+ // Otherwise create an extra info struct with all of our info.
+ Info.set<EIIK_OutOfLine>(
+ MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
+}
+
void MachineInstr::addMemOperand(MachineFunction &MF,
MachineMemOperand *MO) {
- mmo_iterator OldMemRefs = MemRefs;
- unsigned OldNumMemRefs = NumMemRefs;
+ SmallVector<MachineMemOperand *, 2> MMOs;
+ MMOs.append(memoperands_begin(), memoperands_end());
+ MMOs.push_back(MO);
+ setMemRefs(MF, MMOs);
+}
- unsigned NewNum = NumMemRefs + 1;
- mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
+void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
+ if (this == &MI)
+ // Nothing to do for a self-clone!
+ return;
+
+ assert(&MF == MI.getMF() &&
+ "Invalid machine functions when cloning memory refrences!");
+ // See if we can just steal the extra info already allocated for the
+ // instruction. We can do this whenever the pre- and post-instruction symbols
+ // are the same (including null).
+ if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
+ getPostInstrSymbol() == MI.getPostInstrSymbol()) {
+ Info = MI.Info;
+ return;
+ }
- std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
- NewMemRefs[NewNum - 1] = MO;
- setMemRefs(NewMemRefs, NewMemRefs + NewNum);
+ // Otherwise, fall back on a copy-based clone.
+ setMemRefs(MF, MI.memoperands());
}
/// Check to see if the MMOs pointed to by the two MemRefs arrays are
/// identical.
-static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) {
- auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end();
- auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end();
- if ((E1 - I1) != (E2 - I2))
+static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
+ ArrayRef<MachineMemOperand *> RHS) {
+ if (LHS.size() != RHS.size())
return false;
- for (; I1 != E1; ++I1, ++I2) {
- if (**I1 != **I2)
- return false;
+
+ auto LHSPointees = make_pointee_range(LHS);
+ auto RHSPointees = make_pointee_range(RHS);
+ return std::equal(LHSPointees.begin(), LHSPointees.end(),
+ RHSPointees.begin());
+}
+
+void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
+ ArrayRef<const MachineInstr *> MIs) {
+ // Try handling easy numbers of MIs with simpler mechanisms.
+ if (MIs.empty()) {
+ dropMemRefs(MF);
+ return;
}
- return true;
+ if (MIs.size() == 1) {
+ cloneMemRefs(MF, *MIs[0]);
+ return;
+ }
+ // Because an empty memoperands list provides *no* information and must be
+ // handled conservatively (assuming the instruction can do anything), the only
+ // way to merge with it is to drop all other memoperands.
+ if (MIs[0]->memoperands_empty()) {
+ dropMemRefs(MF);
+ return;
+ }
+
+ // Handle the general case.
+ SmallVector<MachineMemOperand *, 2> MergedMMOs;
+ // Start with the first instruction.
+ assert(&MF == MIs[0]->getMF() &&
+ "Invalid machine functions when cloning memory references!");
+ MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
+ // Now walk all the other instructions and accumulate any different MMOs.
+ for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
+ assert(&MF == MI.getMF() &&
+ "Invalid machine functions when cloning memory references!");
+
+ // Skip MIs with identical operands to the first. This is a somewhat
+ // arbitrary hack but will catch common cases without being quadratic.
+ // TODO: We could fully implement merge semantics here if needed.
+ if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
+ continue;
+
+ // Because an empty memoperands list provides *no* information and must be
+ // handled conservatively (assuming the instruction can do anything), the
+ // only way to merge with it is to drop all other memoperands.
+ if (MI.memoperands_empty()) {
+ dropMemRefs(MF);
+ return;
+ }
+
+ // Otherwise accumulate these into our temporary buffer of the merged state.
+ MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
+ }
+
+ setMemRefs(MF, MergedMMOs);
}
-std::pair<MachineInstr::mmo_iterator, unsigned>
-MachineInstr::mergeMemRefsWith(const MachineInstr& Other) {
-
- // If either of the incoming memrefs are empty, we must be conservative and
- // treat this as if we've exhausted our space for memrefs and dropped them.
- if (memoperands_empty() || Other.memoperands_empty())
- return std::make_pair(nullptr, 0);
-
- // If both instructions have identical memrefs, we don't need to merge them.
- // Since many instructions have a single memref, and we tend to merge things
- // like pairs of loads from the same location, this catches a large number of
- // cases in practice.
- if (hasIdenticalMMOs(*this, Other))
- return std::make_pair(MemRefs, NumMemRefs);
-
- // TODO: consider uniquing elements within the operand lists to reduce
- // space usage and fall back to conservative information less often.
- size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs;
-
- // If we don't have enough room to store this many memrefs, be conservative
- // and drop them. Otherwise, we'd fail asserts when trying to add them to
- // the new instruction.
- if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs))
- return std::make_pair(nullptr, 0);
-
- MachineFunction *MF = getMF();
- mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs);
- mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(),
- MemBegin);
- MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(),
- MemEnd);
- assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs &&
- "missing memrefs");
-
- return std::make_pair(MemBegin, CombinedNumMemRefs);
+MCSymbol *MachineInstr::getOrCreatePreInstrTempSymbol(MCContext &MCCtx) {
+ MCSymbol *S = getPreInstrSymbol();
+ if (S)
+ return S;
+
+ // Create a new temp symbol.
+ S = MCCtx.createTempSymbol();
+
+ if (!Info) {
+ // If we don't have any other extra info, we can store this inline.
+ Info.set<EIIK_PreInstrSymbol>(S);
+ return S;
+ }
+
+ // Otherwise, allocate a fully set of extra info.
+ Info.set<EIIK_OutOfLine>(
+ getMF()->createMIExtraInfo(memoperands(), S, getPostInstrSymbol()));
+
+ return S;
+}
+
+MCSymbol *MachineInstr::getOrCreatePostInstrTempSymbol(MCContext &MCCtx) {
+ MCSymbol *S = getPostInstrSymbol();
+ if (S)
+ return S;
+
+ // Create a new temp symbol.
+ S = MCCtx.createTempSymbol();
+
+ if (!Info) {
+ // If we don't have any other extra info, we can store this inline.
+ Info.set<EIIK_PostInstrSymbol>(S);
+ return S;
+ }
+
+ // Otherwise, allocate a fully set of extra info.
+ Info.set<EIIK_OutOfLine>(
+ getMF()->createMIExtraInfo(memoperands(), getPreInstrSymbol(), S));
+ return S;
}
uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
diff --git a/llvm/lib/CodeGen/MachineOutliner.cpp b/llvm/lib/CodeGen/MachineOutliner.cpp
index a712afec095..b5a4dbf6af6 100644
--- a/llvm/lib/CodeGen/MachineOutliner.cpp
+++ b/llvm/lib/CodeGen/MachineOutliner.cpp
@@ -1197,7 +1197,7 @@ MachineOutliner::createOutlinedFunction(Module &M, const OutlinedFunction &OF,
for (unsigned Str : OF.Sequence) {
MachineInstr *NewMI =
MF.CloneMachineInstr(Mapper.IntegerInstructionMap.find(Str)->second);
- NewMI->dropMemRefs();
+ NewMI->dropMemRefs(MF);
// Don't keep debug information for outlined instructions.
NewMI->setDebugLoc(DebugLoc());
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 9bb00aaef86..943a2b42200 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -3175,28 +3175,26 @@ void SwingSchedulerDAG::updateMemOperands(MachineInstr &NewMI,
return;
// If the instruction has memory operands, then adjust the offset
// when the instruction appears in different stages.
- unsigned NumRefs = NewMI.memoperands_end() - NewMI.memoperands_begin();
- if (NumRefs == 0)
+ if (NewMI.memoperands_empty())
return;
- MachineInstr::mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NumRefs);
- unsigned Refs = 0;
+ SmallVector<MachineMemOperand *, 2> NewMMOs;
for (MachineMemOperand *MMO : NewMI.memoperands()) {
if (MMO->isVolatile() || (MMO->isInvariant() && MMO->isDereferenceable()) ||
(!MMO->getValue())) {
- NewMemRefs[Refs++] = MMO;
+ NewMMOs.push_back(MMO);
continue;
}
unsigned Delta;
if (Num != UINT_MAX && computeDelta(OldMI, Delta)) {
int64_t AdjOffset = Delta * Num;
- NewMemRefs[Refs++] =
- MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize());
+ NewMMOs.push_back(
+ MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize()));
} else {
- NewMI.dropMemRefs();
+ NewMI.dropMemRefs(MF);
return;
}
}
- NewMI.setMemRefs(NewMemRefs, NewMemRefs + NumRefs);
+ NewMI.setMemRefs(MF, NewMMOs);
}
/// Clone the instruction for the new pipelined loop and update the
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 1b0dac76613..6b843f9db59 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -886,12 +886,9 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
RegState::EarlyClobber);
- // Transfer all of the memory reference descriptions of this instruction.
- ArrayRef<MachineMemOperand *> SDNodeMemRefs =
- cast<MachineSDNode>(Node)->memoperands();
- MachineMemOperand **MemRefs = MF->allocateMemRefsArray(SDNodeMemRefs.size());
- std::copy(SDNodeMemRefs.begin(), SDNodeMemRefs.end(), MemRefs);
- MIB.setMemRefs({MemRefs, SDNodeMemRefs.size()});
+ // Set the memory reference descriptions of this instruction now that it is
+ // part of the function.
+ MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
// Insert the instruction into position in the block. This needs to
// happen before any custom inserter hook is called so that the
diff --git a/llvm/lib/CodeGen/StackColoring.cpp b/llvm/lib/CodeGen/StackColoring.cpp
index ff7dc211025..33733661ea6 100644
--- a/llvm/lib/CodeGen/StackColoring.cpp
+++ b/llvm/lib/CodeGen/StackColoring.cpp
@@ -1022,9 +1022,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
}
// We adjust AliasAnalysis information for merged stack slots.
- MachineInstr::mmo_iterator NewMemOps =
- MF->allocateMemRefsArray(I.getNumMemOperands());
- unsigned MemOpIdx = 0;
+ SmallVector<MachineMemOperand *, 2> NewMMOs;
bool ReplaceMemOps = false;
for (MachineMemOperand *MMO : I.memoperands()) {
// If this memory location can be a slot remapped here,
@@ -1051,17 +1049,17 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
}
}
if (MayHaveConflictingAAMD) {
- NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes());
+ NewMMOs.push_back(MF->getMachineMemOperand(MMO, AAMDNodes()));
ReplaceMemOps = true;
+ } else {
+ NewMMOs.push_back(MMO);
}
- else
- NewMemOps[MemOpIdx++] = MMO;
}
// If any memory operand is updated, set memory references of
// this instruction.
if (ReplaceMemOps)
- I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands()));
+ I.setMemRefs(*MF, NewMMOs);
}
// Update the location of C++ catch objects for the MSVC personality routine.
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 034ad9a4982..19670c24ae8 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -583,7 +583,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
}
if (NewMI) {
- NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ NewMI->setMemRefs(MF, MI.memoperands());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->mayStore()) &&
@@ -653,10 +653,10 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
// Copy the memoperands from the load to the folded instruction.
if (MI.memoperands_empty()) {
- NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
+ NewMI->setMemRefs(MF, LoadMI.memoperands());
} else {
// Handle the rare case of folding multiple loads.
- NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ NewMI->setMemRefs(MF, MI.memoperands());
for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
E = LoadMI.memoperands_end();
I != E; ++I) {
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 7b1b76821da..aeb321f4a42 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -968,7 +968,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
MIB.add(MI->getOperand(i));
// Inherit previous memory operands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.cloneMemRefs(*MI);
assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
// Add a new memory operand for this FI.
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 6dc5d19862a..9a3f4d9f2f3 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -479,7 +479,7 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
MIB.addImm(CSStackSizeIncImm);
MIB.setMIFlags(MBBI->getFlags());
- MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end());
+ MIB.setMemRefs(MBBI->memoperands());
return std::prev(MBB.erase(MBBI));
}
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 4a19ecd6910..aa732a99469 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -702,7 +702,7 @@ AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
.addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
.add(BaseRegOp)
.addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*MergeMI))
+ .cloneMergedMemRefs({&*I, &*MergeMI})
.setMIFlags(I->mergeFlagsWith(*MergeMI));
(void)MIB;
@@ -819,7 +819,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
.add(RegOp1)
.add(BaseRegOp)
.addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*Paired))
+ .cloneMergedMemRefs({&*I, &*Paired})
.setMIFlags(I->mergeFlagsWith(*Paired));
(void)MIB;
@@ -1338,7 +1338,7 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
.add(getLdStRegOp(*I))
.add(getLdStBaseOp(*I))
.addImm(Value)
- .setMemRefs(I->memoperands_begin(), I->memoperands_end())
+ .setMemRefs(I->memoperands())
.setMIFlags(I->mergeFlagsWith(*Update));
} else {
// Paired instruction.
@@ -1349,7 +1349,7 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
.add(getLdStRegOp(*I, 1))
.add(getLdStBaseOp(*I))
.addImm(Value / Scale)
- .setMemRefs(I->memoperands_begin(), I->memoperands_end())
+ .setMemRefs(I->memoperands())
.setMIFlags(I->mergeFlagsWith(*Update));
}
(void)MIB;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3e7ca62e8b8..c73e6b57ee0 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3419,7 +3419,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
MIB.add(MI.getOperand(I));
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return BB;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index f3745382a6f..a543f0e1c31 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -3735,7 +3735,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
MIB.addImm(TFE->getImm());
}
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
Addr64 = MIB;
} else {
// Atomics with return.
@@ -3749,7 +3749,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
.add(*SOffset)
.add(*Offset)
.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .cloneMemRefs(MI);
}
MI.removeFromParent();
@@ -3955,17 +3955,17 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
}
MachineInstr *NewInstr =
- BuildMI(*MBB, Inst, Inst.getDebugLoc(),
- get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst)
- .add(*VAddr) // vaddr
- .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc
- .addImm(0) // soffset
- .addImm(Offset) // offset
- .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm())
- .addImm(0) // slc
- .addImm(0) // tfe
- .setMemRefs(Inst.memoperands_begin(), Inst.memoperands_end())
- .getInstr();
+ BuildMI(*MBB, Inst, Inst.getDebugLoc(),
+ get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst)
+ .add(*VAddr) // vaddr
+ .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc
+ .addImm(0) // soffset
+ .addImm(Offset) // offset
+ .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm())
+ .addImm(0) // slc
+ .addImm(0) // tfe
+ .cloneMemRefs(Inst)
+ .getInstr();
MRI.replaceRegWith(getNamedOperand(Inst, AMDGPU::OpName::sdst)->getReg(),
VDst);
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 4b537540046..3e1da95e2d2 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -528,13 +528,12 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
.addReg(AddrReg->getReg());
}
- MachineInstrBuilder Read2 =
- BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
- .addReg(BaseReg, BaseRegFlags) // addr
- .addImm(NewOffset0) // offset0
- .addImm(NewOffset1) // offset1
- .addImm(0) // gds
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ MachineInstrBuilder Read2 = BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
+ .addReg(BaseReg, BaseRegFlags) // addr
+ .addImm(NewOffset0) // offset0
+ .addImm(NewOffset1) // offset1
+ .addImm(0) // gds
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
(void)Read2;
@@ -616,15 +615,14 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
.addReg(AddrReg->getReg());
}
- MachineInstrBuilder Write2 =
- BuildMI(*MBB, CI.Paired, DL, Write2Desc)
- .addReg(BaseReg, BaseRegFlags) // addr
- .add(*Data0) // data0
- .add(*Data1) // data1
- .addImm(NewOffset0) // offset0
- .addImm(NewOffset1) // offset1
- .addImm(0) // gds
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ MachineInstrBuilder Write2 = BuildMI(*MBB, CI.Paired, DL, Write2Desc)
+ .addReg(BaseReg, BaseRegFlags) // addr
+ .add(*Data0) // data0
+ .add(*Data1) // data1
+ .addImm(NewOffset0) // offset0
+ .addImm(NewOffset1) // offset1
+ .addImm(0) // gds
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
moveInstsAfter(Write2, CI.InstsToMove);
@@ -652,7 +650,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSBufferLoadImmPair(
.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
.addImm(MergedOffset) // offset
.addImm(CI.GLC0) // glc
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
@@ -711,7 +709,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
.addImm(CI.GLC0) // glc
.addImm(CI.SLC0) // slc
.addImm(0) // tfe
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
@@ -811,10 +809,10 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
.addImm(std::min(CI.Offset0, CI.Offset1)) // offset
- .addImm(CI.GLC0) // glc
- .addImm(CI.SLC0) // slc
- .addImm(0) // tfe
- .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
+ .addImm(CI.GLC0) // glc
+ .addImm(CI.SLC0) // slc
+ .addImm(0) // tfe
+ .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
moveInstsAfter(MIB, CI.InstsToMove);
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 624607f6ea5..669a60fadd4 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -495,15 +495,16 @@ static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
return false;
const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
- MachineInstrBuilder NewMI = BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
- .add(*Reg)
- .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
- .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
- .addImm(Offset)
- .addImm(0) // glc
- .addImm(0) // slc
- .addImm(0) // tfe
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MachineInstrBuilder NewMI =
+ BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
+ .add(*Reg)
+ .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
+ .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
+ .addImm(Offset)
+ .addImm(0) // glc
+ .addImm(0) // slc
+ .addImm(0) // tfe
+ .cloneMemRefs(*MI);
const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
AMDGPU::OpName::vdata_in);
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index b1c2031c7d7..a49a83ab9c5 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1590,11 +1590,10 @@ void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineFunction &MF = *MBB.getParent();
unsigned CPI = Orig.getOperand(1).getIndex();
unsigned PCLabelId = duplicateCPV(MF, CPI);
- MachineInstrBuilder MIB =
- BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
- .addConstantPoolIndex(CPI)
- .addImm(PCLabelId);
- MIB->setMemRefs(Orig.memoperands_begin(), Orig.memoperands_end());
+ BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
+ .addConstantPoolIndex(CPI)
+ .addImm(PCLabelId)
+ .cloneMemRefs(Orig);
break;
}
}
@@ -4534,9 +4533,9 @@ void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
MIB.addReg(Reg, RegState::Kill)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end())
- .add(predOps(ARMCC::AL));
+ .addImm(0)
+ .cloneMemRefs(*MI)
+ .add(predOps(ARMCC::AL));
}
bool
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 5dac6ec0b79..b35a16b8a1e 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -570,7 +570,7 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
}
@@ -645,7 +645,7 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
}
@@ -735,7 +735,7 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
// Transfer memoperands.
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
}
@@ -848,8 +848,8 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
LO16 = LO16.addImm(SOImmValV1);
HI16 = HI16.addImm(SOImmValV2);
- LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ LO16.cloneMemRefs(MI);
+ HI16.cloneMemRefs(MI);
LO16.addImm(Pred).addReg(PredReg).add(condCodeOp());
HI16.addImm(Pred).addReg(PredReg).add(condCodeOp());
if (isCC)
@@ -899,8 +899,8 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
}
}
- LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ LO16.cloneMemRefs(MI);
+ HI16.cloneMemRefs(MI);
LO16.addImm(Pred).addReg(PredReg);
HI16.addImm(Pred).addReg(PredReg);
@@ -1425,7 +1425,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MIB.addExternalSymbol("__aeabi_read_tp", 0);
}
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
TransferImpOps(MI, MIB, MIB);
MI.eraseFromParent();
return true;
@@ -1440,7 +1440,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg)
.add(MI.getOperand(1))
.add(predOps(ARMCC::AL));
- MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB1.cloneMemRefs(MI);
MachineInstrBuilder MIB2 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
.addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
@@ -1544,7 +1544,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
if (isARM) {
MIB3.add(predOps(ARMCC::AL));
if (Opcode == ARM::MOV_ga_pcrel_ldr)
- MIB3->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB3.cloneMemRefs(MI);
}
TransferImpOps(MI, MIB1, MIB3);
MI.eraseFromParent();
@@ -1596,7 +1596,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Add an implicit def for the super-register.
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return true;
}
@@ -1629,7 +1629,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MIB->addRegisterKilled(SrcReg, TRI, true);
TransferImpOps(MI, MIB, MIB);
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index 397c9dadb4a..0e3b42314f6 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -132,7 +132,7 @@ void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI) const {
BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg)
.addReg(Reg, RegState::Kill)
.addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end())
+ .cloneMemRefs(*MI)
.add(predOps(ARMCC::AL));
}
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index db5f28480e9..bb79783e15b 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1303,7 +1303,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
MIB.add(MI->getOperand(OpNum));
// Transfer memoperands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
MBB.erase(MBBI);
return true;
@@ -1527,7 +1527,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const {
// Transfer implicit operands.
for (const MachineOperand &MO : MI.implicit_operands())
MIB.add(MO);
- MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.setMemRefs(MI.memoperands());
MBB.erase(MBBI);
return true;
@@ -2290,7 +2290,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
+ MIB.cloneMergedMemRefs({Op0, Op1});
LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumLDRDFormed;
} else {
@@ -2304,7 +2304,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
+ MIB.cloneMergedMemRefs({Op0, Op1});
LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumSTRDFormed;
}
diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index abf54ba7e87..2f510c52640 100644
--- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -485,7 +485,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
.addReg(Rt, IsStore ? 0 : RegState::Define);
// Transfer memoperands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
@@ -605,7 +605,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
MIB.add(MI->getOperand(OpNum));
// Transfer memoperands.
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
// Transfer MI flags.
MIB.setMIFlags(MI->getFlags());
diff --git a/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp b/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
index fec7080081d..53ae649cf6b 100644
--- a/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp
@@ -568,8 +568,8 @@ bool AVRExpandPseudo::expand<AVR::LDSWRdK>(Block &MBB, BlockIt MBBI) {
llvm_unreachable("Unknown operand type!");
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -617,8 +617,8 @@ bool AVRExpandPseudo::expand<AVR::LDWRdPtr>(Block &MBB, BlockIt MBBI) {
buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -648,8 +648,8 @@ bool AVRExpandPseudo::expand<AVR::LDWRdPtrPi>(Block &MBB, BlockIt MBBI) {
.addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead))
.addReg(SrcReg, RegState::Kill);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -679,8 +679,8 @@ bool AVRExpandPseudo::expand<AVR::LDWRdPtrPd>(Block &MBB, BlockIt MBBI) {
.addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead))
.addReg(SrcReg, RegState::Kill);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -734,8 +734,8 @@ bool AVRExpandPseudo::expand<AVR::LDDWRdPtrQ>(Block &MBB, BlockIt MBBI) {
buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -782,8 +782,8 @@ bool AVRExpandPseudo::expand<AVR::LPMWRdZ>(Block &MBB, BlockIt MBBI) {
buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
}
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1003,8 +1003,8 @@ bool AVRExpandPseudo::expand<AVR::STSWKRr>(Block &MBB, BlockIt MBBI) {
MIBLO.addReg(SrcLoReg, getKillRegState(SrcIsKill));
MIBHI.addReg(SrcHiReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1031,8 +1031,8 @@ bool AVRExpandPseudo::expand<AVR::STWPtrRr>(Block &MBB, BlockIt MBBI) {
.addImm(1)
.addReg(SrcHiReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1065,8 +1065,8 @@ bool AVRExpandPseudo::expand<AVR::STWPtrPiRr>(Block &MBB, BlockIt MBBI) {
.addReg(SrcHiReg, getKillRegState(SrcIsKill))
.addImm(Imm);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1099,8 +1099,8 @@ bool AVRExpandPseudo::expand<AVR::STWPtrPdRr>(Block &MBB, BlockIt MBBI) {
.addReg(SrcLoReg, getKillRegState(SrcIsKill))
.addImm(Imm);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1133,8 +1133,8 @@ bool AVRExpandPseudo::expand<AVR::STDWPtrQRr>(Block &MBB, BlockIt MBBI) {
.addImm(Imm + 1)
.addReg(SrcHiReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1163,8 +1163,8 @@ bool AVRExpandPseudo::expand<AVR::INWRdA>(Block &MBB, BlockIt MBBI) {
.addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead))
.addImm(Imm + 1);
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
@@ -1194,8 +1194,8 @@ bool AVRExpandPseudo::expand<AVR::OUTWARr>(Block &MBB, BlockIt MBBI) {
.addImm(Imm)
.addReg(SrcLoReg, getKillRegState(SrcIsKill));
- MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIBLO.setMemRefs(MI.memoperands());
+ MIBHI.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
index cbce61bc63c..35b0ff905e4 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp
@@ -1629,7 +1629,7 @@ bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) {
else
MIB.add(MachineOperand(ExtR));
}
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
@@ -1680,7 +1680,7 @@ bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) {
// Add the stored value for stores.
if (MI.mayStore())
MIB.add(getStoredValueOp(MI));
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
@@ -1797,7 +1797,7 @@ bool HCE::replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI,
// Add the stored value for stores.
if (MI.mayStore())
MIB.add(getStoredValueOp(MI));
- MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index 557e6384be6..8e2f5093038 100644
--- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -731,9 +731,7 @@ void HexagonEarlyIfConversion::predicateInstr(MachineBasicBlock *ToB,
MIB.add(MO);
// Set memory references.
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(*MI);
MI->eraseFromParent();
return;
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 7e774674e0c..1a762c0c9de 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -891,14 +891,7 @@ void HexagonExpandCondsets::predicateAt(const MachineOperand &DefOp,
MB.add(MO);
Ox++;
}
-
- MachineFunction &MF = *B.getParent();
- MachineInstr::mmo_iterator I = MI.memoperands_begin();
- unsigned NR = std::distance(I, MI.memoperands_end());
- MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(NR);
- for (unsigned i = 0; i < NR; ++i)
- MemRefs[i] = *I++;
- MB.setMemRefs(MemRefs, MemRefs+NR);
+ MB.cloneMemRefs(MI);
MachineInstr *NewI = MB;
NewI->clearKillInfo();
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 97b02e2b34c..0f527d3083e 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1579,10 +1579,10 @@ bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B,
// S2_storeri_io FI, 0, TmpR
BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(TmpR, RegState::Kill)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(TmpR, RegState::Kill)
+ .cloneMemRefs(*MI);
NewRegs.push_back(TmpR);
B.erase(It);
@@ -1604,9 +1604,9 @@ bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
// TmpR = L2_loadri_io FI, 0
unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
- .addFrameIndex(FI)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .cloneMemRefs(*MI);
// DstR = C2_tfrrp TmpR if DstR is a predicate register
// DstR = A2_tfrrcr TmpR if DstR is a modifier register
@@ -1731,10 +1731,10 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
: Hexagon::V6_vS32Ub_ai;
BuildMI(B, It, DL, HII.get(StoreOpc))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(SrcLo, getKillRegState(IsKill))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(SrcLo, getKillRegState(IsKill))
+ .cloneMemRefs(*MI);
}
// Store high part.
@@ -1742,10 +1742,10 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
: Hexagon::V6_vS32Ub_ai;
BuildMI(B, It, DL, HII.get(StoreOpc))
- .addFrameIndex(FI)
- .addImm(Size)
- .addReg(SrcHi, getKillRegState(IsKill))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(Size)
+ .addReg(SrcHi, getKillRegState(IsKill))
+ .cloneMemRefs(*MI);
}
B.erase(It);
@@ -1777,17 +1777,17 @@ bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
: Hexagon::V6_vL32Ub_ai;
BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
- .addFrameIndex(FI)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .cloneMemRefs(*MI);
// Load high part.
LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
: Hexagon::V6_vL32Ub_ai;
BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
- .addFrameIndex(FI)
- .addImm(Size)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(Size)
+ .cloneMemRefs(*MI);
B.erase(It);
return true;
@@ -1813,10 +1813,10 @@ bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
: Hexagon::V6_vS32Ub_ai;
BuildMI(B, It, DL, HII.get(StoreOpc))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(SrcR, getKillRegState(IsKill))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(SrcR, getKillRegState(IsKill))
+ .cloneMemRefs(*MI);
B.erase(It);
return true;
@@ -1841,9 +1841,9 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
: Hexagon::V6_vL32Ub_ai;
BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
- .addFrameIndex(FI)
- .addImm(0)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addFrameIndex(FI)
+ .addImm(0)
+ .cloneMemRefs(*MI);
B.erase(It);
return true;
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 6019c7c5d02..03ec645a82c 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1086,19 +1086,18 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
- MachineInstr *MI1New =
- BuildMI(MBB, MI, DL, get(NewOpc))
- .add(MI.getOperand(0))
- .addImm(MI.getOperand(1).getImm())
- .addReg(SrcSubLo)
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc))
+ .add(MI.getOperand(0))
+ .addImm(MI.getOperand(1).getImm())
+ .addReg(SrcSubLo)
+ .cloneMemRefs(MI);
MI1New->getOperand(0).setIsKill(false);
BuildMI(MBB, MI, DL, get(NewOpc))
.add(MI.getOperand(0))
// The Vectors are indexed in multiples of vector size.
.addImm(MI.getOperand(1).getImm() + Offset)
.addReg(SrcSubHi)
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
@@ -1111,15 +1110,15 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc),
HRI.getSubReg(DstReg, Hexagon::vsub_lo))
- .add(MI.getOperand(1))
- .addImm(MI.getOperand(2).getImm())
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .add(MI.getOperand(1))
+ .addImm(MI.getOperand(2).getImm())
+ .cloneMemRefs(MI);
MI1New->getOperand(1).setIsKill(false);
BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi))
.add(MI.getOperand(1))
// The Vectors are indexed in multiples of vector size.
.addImm(MI.getOperand(2).getImm() + Offset)
- .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .cloneMemRefs(MI);
MBB.erase(MI);
return true;
}
diff --git a/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp b/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp
index 35e2542dfb1..54500b0e52e 100644
--- a/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp
+++ b/llvm/lib/Target/Lanai/LanaiMemAluCombiner.cpp
@@ -277,8 +277,7 @@ void LanaiMemAluCombiner::insertMergedInstruction(MachineBasicBlock *BB,
InstrBuilder.addImm(LPAC::makePostOp(AluOpcode));
// Transfer memory operands.
- InstrBuilder->setMemRefs(MemInstr->memoperands_begin(),
- MemInstr->memoperands_end());
+ InstrBuilder.setMemRefs(MemInstr->memoperands());
}
// Function determines if ALU operation (in alu_iter) can be combined with
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 55d2e547dbf..bfb4c775205 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -663,8 +663,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
}
MIB.copyImplicitOps(*I);
-
- MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
+ MIB.cloneMemRefs(*I);
return MIB;
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index fc43b5d555a..c2798349dec 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -9964,10 +9964,6 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
- // Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
-
unsigned DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
@@ -10030,10 +10026,10 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
- .addReg(PPC::X2)
- .addImm(TOCOffset)
- .addReg(BufReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ .addReg(PPC::X2)
+ .addImm(TOCOffset)
+ .addReg(BufReg)
+ .cloneMemRefs(MI);
}
// Naked functions never have a base pointer, and so we use r1. For all
@@ -10048,8 +10044,8 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
.addReg(BaseReg)
.addImm(BPOffset)
- .addReg(BufReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ .addReg(BufReg)
+ .cloneMemRefs(MI);
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
@@ -10082,8 +10078,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
.addImm(LabelOffset)
.addReg(BufReg);
}
-
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
mainMBB->addSuccessor(sinkMBB);
@@ -10107,10 +10102,6 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- // Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
-
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
@@ -10148,7 +10139,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
.addImm(0)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload IP
if (PVT == MVT::i64) {
@@ -10160,7 +10151,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
.addImm(LabelOffset)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload SP
if (PVT == MVT::i64) {
@@ -10172,7 +10163,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
.addImm(SPOffset)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload BP
if (PVT == MVT::i64) {
@@ -10184,16 +10175,15 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
.addImm(BPOffset)
.addReg(BufReg);
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.cloneMemRefs(MI);
// Reload TOC
if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
setUsesTOCBasePtr(*MBB->getParent());
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
- .addImm(TOCOffset)
- .addReg(BufReg);
-
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ .addImm(TOCOffset)
+ .addReg(BufReg)
+ .cloneMemRefs(MI);
}
// Jump
diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index 9edd1fc3640..668a77ac014 100644
--- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -294,11 +294,10 @@ bool SystemZElimCompare::convertToLoadAndTest(
return false;
// Rebuild to get the CC operand in the right place.
- MachineInstr *BuiltMI =
- BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode));
+ auto MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode));
for (const auto &MO : MI.operands())
- BuiltMI->addOperand(MO);
- BuiltMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ MIB.add(MO);
+ MIB.setMemRefs(MI.memoperands());
MI.eraseFromParent();
return true;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index e7f1ea038d2..7ab4024d43c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -6852,7 +6852,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
.addImm(ThisLength)
.add(SrcBase)
.addImm(SrcDisp)
- ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
+ .setMemRefs(MI.memoperands());
DestDisp += ThisLength;
SrcDisp += ThisLength;
Length -= ThisLength;
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
index d9bf60c2c9f..ed297e67820 100644
--- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
@@ -288,7 +288,7 @@ MachineInstr *FixupBWInstPass::tryReplaceLoad(unsigned New32BitOpcode,
for (unsigned i = 1; i < NumArgs; ++i)
MIB.add(MI->getOperand(i));
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands());
return MIB;
}
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index c17c51a7aea..0a4914d338e 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -1048,7 +1048,7 @@ void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
MIB.addReg(CondReg);
- MIB->setMemRefs(SetCCI.memoperands_begin(), SetCCI.memoperands_end());
+ MIB.setMemRefs(SetCCI.memoperands());
SetCCI.eraseFromParent();
return;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2042ee4c920..204238a6350 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26795,8 +26795,8 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
// Memory Reference
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
// Machine Information
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
@@ -26894,7 +26894,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Check if there is enough room left to pull this argument.
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
@@ -26919,7 +26919,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.add(Index)
.addDisp(Disp, 16)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Zero-extend the offset
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
@@ -26947,7 +26947,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.addReg(NextOffsetReg)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// Jump to endMBB
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
@@ -26966,7 +26966,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// If we need to align it, do so. Otherwise, just copy the address
// to OverflowDestReg.
@@ -27003,7 +27003,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
.addDisp(Disp, 8)
.add(Segment)
.addReg(NextAddrReg)
- .setMemRefs(MMOBegin, MMOEnd);
+ .setMemRefs(MMOs);
// If we branched, emit the PHI to the front of endMBB.
if (offsetMBB) {
@@ -27977,8 +27977,8 @@ void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
MachineInstrBuilder MIB;
// Memory Reference.
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
// Initialize a register with zero.
MVT PVT = getPointerTy(MF->getDataLayout());
@@ -28007,7 +28007,7 @@ void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
MIB.add(MI.getOperand(MemOpndSlot + i));
}
MIB.addReg(SSPCopyReg);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
}
MachineBasicBlock *
@@ -28023,8 +28023,8 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
unsigned DstReg;
unsigned MemOpndSlot = 0;
@@ -28118,7 +28118,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MIB.addReg(LabelReg);
else
MIB.addMBB(restoreMBB);
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
emitSetJmpShadowStackFix(MI, thisMBB);
@@ -28179,8 +28179,8 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
@@ -28267,7 +28267,7 @@ X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Subtract the current SSP from the previous SSP.
unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
@@ -28351,8 +28351,8 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
+ SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
+ MI.memoperands_end());
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
@@ -28385,7 +28385,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Reload IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
@@ -28395,7 +28395,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Reload SP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
@@ -28405,7 +28405,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
else
MIB.add(MI.getOperand(i));
}
- MIB.setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
// Jump
BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 4073f455a81..513011a1cf6 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3308,24 +3308,21 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addReg(SrcReg, getKillRegState(isKill));
}
-void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+void X86InstrInfo::storeRegToAddr(
+ MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
+ ArrayRef<MachineMemOperand *> MMOs,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = MMOBegin != MMOEnd &&
- (*MMOBegin)->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
MIB.addReg(SrcReg, getKillRegState(isKill));
- (*MIB).setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
@@ -3345,22 +3342,20 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
-void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+void X86InstrInfo::loadRegFromAddr(
+ MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC,
+ ArrayRef<MachineMemOperand *> MMOs,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = MMOBegin != MMOEnd &&
- (*MMOBegin)->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
- (*MIB).setMemRefs(MMOBegin, MMOEnd);
+ MIB.setMemRefs(MMOs);
NewMIs.push_back(MIB);
}
@@ -5450,9 +5445,8 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the load instruction.
if (UnfoldLoad) {
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
- MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
+ auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
+ loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
@@ -5517,9 +5511,8 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
- std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
- MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
+ auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
+ storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs);
}
return true;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index b1ceb767cce..4cad655ad52 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -359,8 +359,7 @@ public:
void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
+ ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
@@ -371,8 +370,7 @@ public:
void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
- MachineInstr::mmo_iterator MMOBegin,
- MachineInstr::mmo_iterator MMOEnd,
+ ArrayRef<MachineMemOperand *> MMOs,
SmallVectorImpl<MachineInstr *> &NewMIs) const;
bool expandPostRAPseudo(MachineInstr &MI) const override;
OpenPOWER on IntegriCloud