summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2007-12-01 02:07:52 +0000
committerEvan Cheng <evan.cheng@apple.com>2007-12-01 02:07:52 +0000
commit69fda0a7167048ae647675536fd7b2e4bb02f5a6 (patch)
treed1c03a1a47b4bf6b6c5199e5b85394c8a95a8320 /llvm/lib/Target
parente62b441b516089f04648120d5ed9b4581d4fafe4 (diff)
downloadbcm5719-llvm-69fda0a7167048ae647675536fd7b2e4bb02f5a6.tar.gz
bcm5719-llvm-69fda0a7167048ae647675536fd7b2e4bb02f5a6.zip
Allow some reloads to be folded in multi-use cases. Specifically testl r, r -> cmpl [mem], 0.
llvm-svn: 44479
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/ARM/ARMRegisterInfo.h12
-rw-r--r--llvm/lib/Target/Alpha/AlphaRegisterInfo.h12
-rw-r--r--llvm/lib/Target/Mips/MipsRegisterInfo.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsRegisterInfo.h16
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.h12
-rw-r--r--llvm/lib/Target/Sparc/SparcRegisterInfo.h12
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.cpp75
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.h13
8 files changed, 150 insertions, 8 deletions
diff --git a/llvm/lib/Target/ARM/ARMRegisterInfo.h b/llvm/lib/Target/ARM/ARMRegisterInfo.h
index b1110db8f0f..97be04f421a 100644
--- a/llvm/lib/Target/ARM/ARMRegisterInfo.h
+++ b/llvm/lib/Target/ARM/ARMRegisterInfo.h
@@ -77,11 +77,23 @@ public:
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
int FrameIndex) const;
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ return 0;
+ }
+
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const TargetRegisterClass* const*
diff --git a/llvm/lib/Target/Alpha/AlphaRegisterInfo.h b/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
index 6275cb4057e..97d3280a090 100644
--- a/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
+++ b/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
@@ -51,11 +51,23 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
MachineInstr* foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
int FrameIndex) const;
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ return 0;
+ }
+
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index 9dc85638843..94cf59bb8bc 100644
--- a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -203,12 +203,6 @@ foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
return NewMI;
}
-MachineInstr *MipsRegisterInfo::
-foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- MachineInstr* LoadMI) const {
- return NULL;
-}
-
//===----------------------------------------------------------------------===//
//
// Callee Saved Registers methods
diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.h b/llvm/lib/Target/Mips/MipsRegisterInfo.h
index 636ffb9f431..123f6e87dc8 100644
--- a/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -58,8 +58,22 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
int FrameIndex) const;
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ return 0;
+ }
+
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
- MachineInstr* LoadMI) const;
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned DestReg, unsigned SrcReg,
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 776d11cd073..3fce8924d17 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -68,11 +68,23 @@ public:
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
int FrameIndex) const;
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ return 0;
+ }
+
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
const TargetRegisterClass* const*
diff --git a/llvm/lib/Target/Sparc/SparcRegisterInfo.h b/llvm/lib/Target/Sparc/SparcRegisterInfo.h
index dec01e02769..cecbc8a87d9 100644
--- a/llvm/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/llvm/lib/Target/Sparc/SparcRegisterInfo.h
@@ -63,11 +63,23 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ return 0;
+ }
+
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
+ virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const TargetRegisterClass* const* getCalleeSavedRegClasses(
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 25b6375ce32..29f401ab7bd 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -1149,6 +1149,31 @@ MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNu
return foldMemoryOperand(MI, OpNum, MOs);
}
+MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const {
+ // Check switch flag
+ if (NoFusing) return NULL;
+
+ if (UseOps.size() == 1)
+ return foldMemoryOperand(MI, UseOps[0], FrameIndex);
+ else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ return NULL;
+
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ return foldMemoryOperand(MI, 0, FrameIndex);
+}
+
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
MachineInstr *LoadMI) const {
// Check switch flag
@@ -1160,6 +1185,31 @@ MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNu
return foldMemoryOperand(MI, OpNum, MOs);
}
+MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr *LoadMI) const {
+ // Check switch flag
+ if (NoFusing) return NULL;
+
+ if (UseOps.size() == 1)
+ return foldMemoryOperand(MI, UseOps[0], LoadMI);
+ else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
+ return NULL;
+ unsigned NewOpc = 0;
+ switch (MI->getOpcode()) {
+ default: return NULL;
+ case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ }
+ // Change to CMPXXri r, 0 first.
+ MI->setInstrDescriptor(TII.get(NewOpc));
+ MI->getOperand(1).ChangeToImmediate(0);
+ return foldMemoryOperand(MI, 0, LoadMI);
+}
+
+
unsigned X86RegisterInfo::getOpcodeAfterMemoryFold(unsigned Opc,
unsigned OpNum) const {
// Check switch flag
@@ -1270,7 +1320,30 @@ bool X86RegisterInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
MachineOperand &MO = ImpOps[i];
MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead());
}
- NewMIs.push_back(MIB);
+ // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
+ unsigned NewOpc = 0;
+ switch (DataMI->getOpcode()) {
+ default: break;
+ case X86::CMP64ri32:
+ case X86::CMP32ri:
+ case X86::CMP16ri:
+ case X86::CMP8ri: {
+ MachineOperand &MO0 = DataMI->getOperand(0);
+ MachineOperand &MO1 = DataMI->getOperand(1);
+ if (MO1.getImm() == 0) {
+ switch (DataMI->getOpcode()) {
+ default: break;
+ case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
+ case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
+ case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
+ case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
+ }
+ DataMI->setInstrDescriptor(TII.get(NewOpc));
+ MO1.ChangeToRegister(MO0.getReg(), false);
+ }
+ }
+ }
+ NewMIs.push_back(DataMI);
// Emit the store instruction.
if (UnfoldStore) {
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 18e8b907c3a..53f08440a3f 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -141,6 +141,12 @@ public:
unsigned OpNum,
int FrameIndex) const;
+ /// foldMemoryOperand - Same as previous except it tries to fold instruction
+ /// with multiple uses of the same register.
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ int FrameIndex) const;
+
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
@@ -148,6 +154,13 @@ public:
unsigned OpNum,
MachineInstr* LoadMI) const;
+ /// foldMemoryOperand - Same as the previous version except it allows folding
+ /// of any load and store from / to any address, not just from a specific
+ /// stack slot.
+ MachineInstr* foldMemoryOperand(MachineInstr* MI,
+ SmallVectorImpl<unsigned> &UseOps,
+ MachineInstr* LoadMI) const;
+
/// getOpcodeAfterMemoryFold - Returns the opcode of the would be new
/// instruction after load / store is folded into an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
OpenPOWER on IntegriCloud