summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2007-12-01 02:07:52 +0000
committerEvan Cheng <evan.cheng@apple.com>2007-12-01 02:07:52 +0000
commit69fda0a7167048ae647675536fd7b2e4bb02f5a6 (patch)
treed1c03a1a47b4bf6b6c5199e5b85394c8a95a8320 /llvm/lib/Target/X86
parente62b441b516089f04648120d5ed9b4581d4fafe4 (diff)
downloadbcm5719-llvm-69fda0a7167048ae647675536fd7b2e4bb02f5a6.tar.gz
bcm5719-llvm-69fda0a7167048ae647675536fd7b2e4bb02f5a6.zip
Allow some reloads to be folded in multi-use cases. Specifically testl r, r -> cmpl [mem], 0.
llvm-svn: 44479
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.cpp75
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.h13
2 files changed, 87 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 25b6375ce32..29f401ab7bd 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -1149,6 +1149,31 @@ MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNu
return foldMemoryOperand(MI, OpNum, MOs);
}
+MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ // Check switch flag
+ if (NoFusing) return NULL;
+
+ if (UseOps.size() == 1)
+ return foldMemoryOperand(MI, UseOps[0], FrameIndex);
+ else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ return NULL;
+
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ return foldMemoryOperand(MI, 0, FrameIndex);
+}
+
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
MachineInstr *LoadMI) const {
// Check switch flag
@@ -1160,6 +1185,31 @@ MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNu
return foldMemoryOperand(MI, OpNum, MOs);
}
+MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr *LoadMI) const {
+ // Check switch flag
+ if (NoFusing) return NULL;
+
+ if (UseOps.size() == 1)
+ return foldMemoryOperand(MI, UseOps[0], LoadMI);
+ else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ return NULL;
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ return foldMemoryOperand(MI, 0, LoadMI);
+}
+
+
unsigned X86RegisterInfo::getOpcodeAfterMemoryFold(unsigned Opc,
unsigned OpNum) const {
// Check switch flag
@@ -1270,7 +1320,30 @@ bool X86RegisterInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
MachineOperand &MO = ImpOps[i];
MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead());
}
- NewMIs.push_back(MIB);
+ // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
+ unsigned NewOpc = 0;
+ switch (DataMI->getOpcode()) {
+ default: break;
+ case X86::CMP64ri32:
+ case X86::CMP32ri:
+ case X86::CMP16ri:
+ case X86::CMP8ri: {
+ MachineOperand &MO0 = DataMI->getOperand(0);
+ MachineOperand &MO1 = DataMI->getOperand(1);
+ if (MO1.getImm() == 0) {
+ switch (DataMI->getOpcode()) {
+ default: break;
+ case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
+ case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
+ case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
+ case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
+ }
+ DataMI->setInstrDescriptor(TII.get(NewOpc));
+ MO1.ChangeToRegister(MO0.getReg(), false);
+ }
+ }
+ }
+ NewMIs.push_back(DataMI);
// Emit the store instruction.
if (UnfoldStore) {
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 18e8b907c3a..53f08440a3f 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -141,6 +141,12 @@ public:
unsigned OpNum,
int FrameIndex) const;
+ /// foldMemoryOperand - Same as previous except it tries to fold instruction
+ /// with multiple uses of the same register.
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const;
+
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
@@ -148,6 +154,13 @@ public:
unsigned OpNum,
MachineInstr* LoadMI) const;
+ /// foldMemoryOperand - Same as the previous version except it allows folding
+ /// of any load and store from / to any address, not just from a specific
+ /// stack slot.
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const;
+
/// getOpcodeAfterMemoryFold - Returns the opcode of the would be new
/// instruction after load / store is folded into an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
OpenPOWER on IntegriCloud