summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2005-09-19 06:56:21 +0000
committerChris Lattner <sabre@nondot.org>2005-09-19 06:56:21 +0000
commit2f838f2192efa710de7024662200b36aa90392b0 (patch)
treedae02a5a02a90ffa31f13d3e6d069917f14a1aa8 /llvm/lib
parentde3c87a2ab88e5cedac5a27901158cd6172d6c09 (diff)
downloadbcm5719-llvm-2f838f2192efa710de7024662200b36aa90392b0.tar.gz
bcm5719-llvm-2f838f2192efa710de7024662200b36aa90392b0.zip
Teach the local spiller to turn stack slot loads into register-register copies
when possible, avoiding the load (and avoiding the copy if the value is already in the right register). This patch came about when I noticed code like the following being generated: store R17 -> [SS1] ...blah... R4 = load [SS1] This was causing an LSU reject on the G5. This problem was due to the register allocator folding spill code into a reg-reg copy (producing the load), which prevented the spiller from being able to rewrite the load into a copy, despite the fact that the value was already available in a register. In the case above, we now rip out the R4 load and replace it with a R4 = R17 copy. This speeds up several programs on X86 (which spills a lot :) ), e.g. smg2k from 22.39->20.60s, povray from 12.93->12.66s, 168.wupwise from 68.54->53.83s (!), 197.parser from 7.33->6.62s (!), etc. This may have a larger impact in some cases on the G5 (by avoiding LSU rejects), though it probably won't trigger as often (less spilling in general). Targets that implement folding of loads/stores into copies should implement the isLoadFromStackSlot hook to get this. llvm-svn: 23388
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/VirtRegMap.cpp78
1 files changed, 52 insertions, 26 deletions
diff --git a/llvm/lib/CodeGen/VirtRegMap.cpp b/llvm/lib/CodeGen/VirtRegMap.cpp
index b68f36b6646..c958143d666 100644
--- a/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -466,36 +466,61 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, const VirtRegMap &VRM) {
<< I->second.second);
unsigned VirtReg = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
- if (VRM.hasStackSlot(VirtReg)) {
- int SS = VRM.getStackSlot(VirtReg);
- DEBUG(std::cerr << " - StackSlot: " << SS << "\n");
-
- // If this reference is not a use, any previous store is now dead.
- // Otherwise, the store to this stack slot is not dead anymore.
- std::map<int, MachineInstr*>::iterator MDSI = MaybeDeadStores.find(SS);
- if (MDSI != MaybeDeadStores.end()) {
- if (MR & VirtRegMap::isRef) // Previous store is not dead.
- MaybeDeadStores.erase(MDSI);
- else {
- // If we get here, the store is dead, nuke it now.
- assert(MR == VirtRegMap::isMod && "Can't be modref!");
- MBB.erase(MDSI->second);
- MaybeDeadStores.erase(MDSI);
- ++NumDSE;
+ if (!VRM.hasStackSlot(VirtReg)) {
+ DEBUG(std::cerr << ": No stack slot!\n");
+ continue;
+ }
+ int SS = VRM.getStackSlot(VirtReg);
+ DEBUG(std::cerr << " - StackSlot: " << SS << "\n");
+
+ // If this folded instruction is just a use, check to see if it's a
+ // straight load from the virt reg slot.
+ if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
+ int FrameIdx;
+ if (unsigned DestReg = MRI->isLoadFromStackSlot(&MI, FrameIdx)) {
+ // If this spill slot is available, insert a copy for it!
+ std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS);
+ if (FrameIdx == SS && It != SpillSlotsAvailable.end()) {
+ DEBUG(std::cerr << "Promoted Load To Copy: " << MI);
+ MachineFunction &MF = *MBB.getParent();
+ if (DestReg != It->second) {
+ MRI->copyRegToReg(MBB, &MI, DestReg, It->second,
+ MF.getSSARegMap()->getRegClass(VirtReg));
+ // Revisit the copy if the destination is a vreg.
+ if (MRegisterInfo::isVirtualRegister(DestReg)) {
+ NextMII = &MI;
+ --NextMII; // backtrack to the copy.
+ }
+ }
+ MBB.erase(&MI);
+ goto ProcessNextInst;
}
}
+ }
- // If the spill slot value is available, and this is a new definition of
- // the value, the value is not available anymore.
- if (MR & VirtRegMap::isMod) {
- std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS);
- if (It != SpillSlotsAvailable.end()) {
- PhysRegsAvailable.erase(It->second);
- SpillSlotsAvailable.erase(It);
- }
+ // If this reference is not a use, any previous store is now dead.
+ // Otherwise, the store to this stack slot is not dead anymore.
+ std::map<int, MachineInstr*>::iterator MDSI = MaybeDeadStores.find(SS);
+ if (MDSI != MaybeDeadStores.end()) {
+ if (MR & VirtRegMap::isRef) // Previous store is not dead.
+ MaybeDeadStores.erase(MDSI);
+ else {
+ // If we get here, the store is dead, nuke it now.
+ assert(MR == VirtRegMap::isMod && "Can't be modref!");
+ MBB.erase(MDSI->second);
+ MaybeDeadStores.erase(MDSI);
+ ++NumDSE;
+ }
+ }
+
+ // If the spill slot value is available, and this is a new definition of
+ // the value, the value is not available anymore.
+ if (MR & VirtRegMap::isMod) {
+ std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS);
+ if (It != SpillSlotsAvailable.end()) {
+ PhysRegsAvailable.erase(It->second);
+ SpillSlotsAvailable.erase(It);
}
- } else {
- DEBUG(std::cerr << ": No stack slot!\n");
}
}
@@ -575,6 +600,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, const VirtRegMap &VRM) {
}
}
}
+ ProcessNextInst:
MII = NextMII;
}
}
OpenPOWER on IntegriCloud