summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2010-07-09 17:29:08 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2010-07-09 17:29:08 +0000
commitbd953d1805f92576e555ffdc276f18a47caa5f1b (patch)
tree50662ad92152173744808b70fa448c4e707be6c1 /llvm/lib
parent9d5ae034047b9872b6583fe2f3989f09485ac512 (diff)
downloadbcm5719-llvm-bd953d1805f92576e555ffdc276f18a47caa5f1b.tar.gz
bcm5719-llvm-bd953d1805f92576e555ffdc276f18a47caa5f1b.zip
Change TII::foldMemoryOperand API to require the machine instruction to be
inserted in a MBB, and return an already inserted MI. This target API change is necessary to allow foldMemoryOperand to call storeToStackSlot and loadFromStackSlot when folding a COPY to a stack slot reference in a target independent way. The foldMemoryOperandImpl hook is going to change in the same way, but I'll wait until COPY folding is actually implemented. Most targets only fold copies and won't need to specialize this hook at all. llvm-svn: 107991
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/InlineSpiller.cpp5
-rw-r--r--llvm/lib/CodeGen/LiveIntervalAnalysis.cpp8
-rw-r--r--llvm/lib/CodeGen/PreAllocSplitting.cpp19
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfoImpl.cpp15
-rw-r--r--llvm/lib/CodeGen/VirtRegRewriter.cpp16
5 files changed, 32 insertions, 31 deletions
diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp
index 076d2308ce3..12adcaa3a22 100644
--- a/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -282,13 +282,12 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
FoldOps.push_back(Idx);
}
- MachineInstr *FoldMI = tii_.foldMemoryOperand(mf_, MI, FoldOps, stackSlot_);
+ MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
if (!FoldMI)
return false;
- MachineBasicBlock &MBB = *MI->getParent();
lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
vrm_.addSpillSlotUse(stackSlot_, FoldMI);
- MBB.insert(MBB.erase(MI), FoldMI);
+ MI->eraseFromParent();
DEBUG(dbgs() << "\tfolded: " << *FoldMI);
return true;
}
diff --git a/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index 74ca497624f..742dc64a6be 100644
--- a/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -946,22 +946,22 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
if (DefMI && (MRInfo & VirtRegMap::isMod))
return false;
- MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
- : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
+ MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
+ : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
if (fmi) {
// Remember this instruction uses the spill slot.
if (isSS) vrm.addSpillSlotUse(Slot, fmi);
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
- MachineBasicBlock &MBB = *MI->getParent();
if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
vrm.transferEmergencySpills(MI, fmi);
ReplaceMachineInstrInMaps(MI, fmi);
- MI = MBB.insert(MBB.erase(MI), fmi);
+ MI->eraseFromParent();
+ MI = fmi;
++numFolds;
return true;
}
diff --git a/llvm/lib/CodeGen/PreAllocSplitting.cpp b/llvm/lib/CodeGen/PreAllocSplitting.cpp
index fbe99188cc1..fb2f9093555 100644
--- a/llvm/lib/CodeGen/PreAllocSplitting.cpp
+++ b/llvm/lib/CodeGen/PreAllocSplitting.cpp
@@ -863,12 +863,11 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
}
- MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
- FoldPt, Ops, SS);
+ MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
if (FMI) {
LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
- FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ FoldPt->eraseFromParent();
++NumFolds;
IntervalSSMap[vreg] = SS;
@@ -944,12 +943,11 @@ MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
if (!TII->canFoldMemoryOperand(FoldPt, Ops))
return 0;
- MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
- FoldPt, Ops, SS);
+ MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
if (FMI) {
LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
- FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ FoldPt->eraseFromParent();
++NumRestoreFolds;
}
@@ -1255,9 +1253,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
Ops.push_back(OpIdx);
if (!TII->canFoldMemoryOperand(use, Ops)) continue;
- MachineInstr* NewMI =
- TII->foldMemoryOperand(*use->getParent()->getParent(),
- use, Ops, FrameIndex);
+ MachineInstr* NewMI = TII->foldMemoryOperand(use, Ops, FrameIndex);
if (!NewMI) continue;
@@ -1267,10 +1263,9 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
- MachineBasicBlock* MBB = use->getParent();
- NewMI = MBB->insert(MBB->erase(use), NewMI);
+ use->eraseFromParent();
VNUseCount[CurrVN].erase(use);
-
+
// Remove deleted instructions. Note that we need to remove them from
// the VNInfo->use map as well, just to be safe.
for (SmallPtrSet<MachineInstr*, 4>::iterator II =
diff --git a/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index ff8d0ede827..cc6431dca9a 100644
--- a/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -197,8 +197,7 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
unsigned Flags = 0;
@@ -208,10 +207,15 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
else
Flags |= MachineMemOperand::MOLoad;
+ MachineBasicBlock &MBB = *MI->getParent();
+ MachineFunction &MF = *MBB.getParent();
+
// Ask the target to do the actual folding.
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
if (!NewMI) return 0;
+ NewMI = MBB.insert(MI, NewMI);
+
assert((!(Flags & MachineMemOperand::MOStore) ||
NewMI->getDesc().mayStore()) &&
"Folded a def to a non-store!");
@@ -234,8 +238,7 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
@@ -243,11 +246,15 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
#endif
+ MachineBasicBlock &MBB = *MI->getParent();
+ MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
if (!NewMI) return 0;
+ NewMI = MBB.insert(MI, NewMI);
+
// Copy the memoperands from the load to the folded instruction.
NewMI->setMemRefs(LoadMI->memoperands_begin(),
LoadMI->memoperands_end());
diff --git a/llvm/lib/CodeGen/VirtRegRewriter.cpp b/llvm/lib/CodeGen/VirtRegRewriter.cpp
index d8f02495d40..7d48eac300d 100644
--- a/llvm/lib/CodeGen/VirtRegRewriter.cpp
+++ b/llvm/lib/CodeGen/VirtRegRewriter.cpp
@@ -1409,25 +1409,25 @@ OptimizeByUnfold(MachineBasicBlock::iterator &MII,
if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
+ MBB->insert(MII, NewMI);
NewMIs.clear();
int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
assert(Idx != -1);
SmallVector<unsigned, 1> Ops;
Ops.push_back(Idx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
+ NewMI->eraseFromParent();
if (FoldedMI) {
VRM->addSpillSlotUse(SS, FoldedMI);
if (!VRM->hasPhys(UnfoldVR))
VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
- MII = MBB->insert(MII, FoldedMI);
+ MII = FoldedMI;
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
- MF.DeleteMachineInstr(NewMI);
return true;
}
- MF.DeleteMachineInstr(NewMI);
}
}
@@ -1479,7 +1479,6 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
return false;
- MachineFunction &MF = *MBB->getParent();
MachineInstr &MI = *MII;
MachineBasicBlock::iterator DefMII = prior(MII);
MachineInstr *DefMI = DefMII;
@@ -1510,11 +1509,12 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
if (!CommutedMI)
return false;
+ MBB->insert(MII, CommutedMI);
SmallVector<unsigned, 1> Ops;
Ops.push_back(NewDstIdx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
// Not needed since foldMemoryOperand returns new MI.
- MF.DeleteMachineInstr(CommutedMI);
+ CommutedMI->eraseFromParent();
if (!FoldedMI)
return false;
@@ -1527,7 +1527,7 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
MachineInstr *StoreMI = MII;
VRM->addSpillSlotUse(SS, StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- MII = MBB->insert(MII, FoldedMI); // Update MII to backtrack.
+ MII = FoldedMI; // Update MII to backtrack.
// Delete all 3 old instructions.
InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
OpenPOWER on IntegriCloud