summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86RegisterInfo.cpp
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2007-12-02 08:30:39 +0000
committerEvan Cheng <evan.cheng@apple.com>2007-12-02 08:30:39 +0000
commitf45a1d623cce45fb716555214a92ef3a759c9592 (patch)
tree04a2dba71ecb5aa16b79ee39d26ec97d49caf65d /llvm/lib/Target/X86/X86RegisterInfo.cpp
parent310369fb8440064d623ae8f63354c815de47dcc6 (diff)
downloadbcm5719-llvm-f45a1d623cce45fb716555214a92ef3a759c9592.tar.gz
bcm5719-llvm-f45a1d623cce45fb716555214a92ef3a759c9592.zip
Remove redundant foldMemoryOperand variants and other code clean up.
llvm-svn: 44517
Diffstat (limited to 'llvm/lib/Target/X86/X86RegisterInfo.cpp')
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.cpp87
1 files changed, 36 insertions, 51 deletions
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 29f401ab7bd..122dd9ed758 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -1140,73 +1140,58 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
}
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
- int FrameIndex) const {
- // Check switch flag
- if (NoFusing) return NULL;
- SmallVector<MachineOperand,4> MOs;
- MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
- return foldMemoryOperand(MI, OpNum, MOs);
-}
-
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
- if (UseOps.size() == 1)
- return foldMemoryOperand(MI, UseOps[0], FrameIndex);
- else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ } else if (Ops.size() != 1)
return NULL;
- unsigned NewOpc = 0;
- switch (MI->getOpcode()) {
- default: return NULL;
- case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
- }
- // Change to CMPXXri r, 0 first.
- MI->setInstrDescriptor(TII.get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
- return foldMemoryOperand(MI, 0, FrameIndex);
-}
-
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
- MachineInstr *LoadMI) const {
- // Check switch flag
- if (NoFusing) return NULL;
SmallVector<MachineOperand,4> MOs;
- unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
- for (unsigned i = NumOps - 4; i != NumOps; ++i)
- MOs.push_back(LoadMI->getOperand(i));
- return foldMemoryOperand(MI, OpNum, MOs);
+ MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
+ return foldMemoryOperand(MI, Ops[0], MOs);
}
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &UseOps,
+ SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
// Check switch flag
if (NoFusing) return NULL;
- if (UseOps.size() == 1)
- return foldMemoryOperand(MI, UseOps[0], LoadMI);
- else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ } else if (Ops.size() != 1)
return NULL;
- unsigned NewOpc = 0;
- switch (MI->getOpcode()) {
- default: return NULL;
- case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
- }
- // Change to CMPXXri r, 0 first.
- MI->setInstrDescriptor(TII.get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
- return foldMemoryOperand(MI, 0, LoadMI);
+
+ SmallVector<MachineOperand,4> MOs;
+ unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
+ for (unsigned i = NumOps - 4; i != NumOps; ++i)
+ MOs.push_back(LoadMI->getOperand(i));
+ return foldMemoryOperand(MI, Ops[0], MOs);
}
OpenPOWER on IntegriCloud