summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2015-01-07 20:27:25 +0000
committerTom Stellard <thomas.stellard@amd.com>2015-01-07 20:27:25 +0000
commit4842c052168157142c0d99dfbe9f35a84469b790 (patch)
tree00031c49fb24b50db947e99bbebd4294b654087b /llvm/lib
parent290ece7d4c363cee09f64919ae43d00f48fcb16e (diff)
downloadbcm5719-llvm-4842c052168157142c0d99dfbe9f35a84469b790.tar.gz
bcm5719-llvm-4842c052168157142c0d99dfbe9f35a84469b790.zip
R600/SI: Add a V_MOV_B64 pseudo instruction
This is used to simplify the SIFoldOperands pass and make it easier to fold immediates. llvm-svn: 225373
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/R600/SIFoldOperands.cpp1
-rw-r--r--llvm/lib/Target/R600/SIInstrInfo.cpp31
-rw-r--r--llvm/lib/Target/R600/SIInstructions.td6
3 files changed, 38 insertions, 0 deletions
diff --git a/llvm/lib/Target/R600/SIFoldOperands.cpp b/llvm/lib/Target/R600/SIFoldOperands.cpp
index 23d4a4dbe17..ddb285213d2 100644
--- a/llvm/lib/Target/R600/SIFoldOperands.cpp
+++ b/llvm/lib/Target/R600/SIFoldOperands.cpp
@@ -86,6 +86,7 @@ static bool isSafeToFold(unsigned Opcode) {
switch(Opcode) {
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::V_MOV_B32_e64:
+ case AMDGPU::V_MOV_B64_PSEUDO:
case AMDGPU::S_MOV_B32:
case AMDGPU::S_MOV_B64:
case AMDGPU::COPY:
diff --git a/llvm/lib/Target/R600/SIInstrInfo.cpp b/llvm/lib/Target/R600/SIInstrInfo.cpp
index 08dd425ecbe..aa9e1a31782 100644
--- a/llvm/lib/Target/R600/SIInstrInfo.cpp
+++ b/llvm/lib/Target/R600/SIInstrInfo.cpp
@@ -424,6 +424,8 @@ unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
} else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
return AMDGPU::S_MOV_B64;
+ } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
+ return AMDGPU::V_MOV_B64_PSEUDO;
}
return AMDGPU::COPY;
}
@@ -672,6 +674,35 @@ bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
// This is just a placeholder for register allocation.
MI->eraseFromParent();
break;
+
+ case AMDGPU::V_MOV_B64_PSEUDO: {
+ unsigned Dst = MI->getOperand(0).getReg();
+ unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
+ unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
+
+ const MachineOperand &SrcOp = MI->getOperand(1);
+ // FIXME: Will this work for 64-bit floating point immediates?
+ assert(!SrcOp.isFPImm());
+ if (SrcOp.isImm()) {
+ APInt Imm(64, SrcOp.getImm());
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
+ .addImm(Imm.getLoBits(32).getZExtValue())
+ .addReg(Dst, RegState::Implicit);
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
+ .addImm(Imm.getHiBits(32).getZExtValue())
+ .addReg(Dst, RegState::Implicit);
+ } else {
+ assert(SrcOp.isReg());
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
+ .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
+ .addReg(Dst, RegState::Implicit);
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
+ .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
+ .addReg(Dst, RegState::Implicit);
+ }
+ MI->eraseFromParent();
+ break;
+ }
}
return true;
}
diff --git a/llvm/lib/Target/R600/SIInstructions.td b/llvm/lib/Target/R600/SIInstructions.td
index c130a7625ce..4a4c94c6cc4 100644
--- a/llvm/lib/Target/R600/SIInstructions.td
+++ b/llvm/lib/Target/R600/SIInstructions.td
@@ -1742,6 +1742,12 @@ defm V_TRIG_PREOP_F64 : VOP3Inst <
//===----------------------------------------------------------------------===//
let isCodeGenOnly = 1, isPseudo = 1 in {
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
+// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
+// pass to enable folding of inline immediates.
+def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$dst), (ins VSrc_64:$src0), "", []>;
+} // end let hasSideEffects = 0, mayLoad = 0, mayStore = 0
+
let hasSideEffects = 1 in {
def SGPR_USE : InstSI <(outs),(ins), "", []>;
}
OpenPOWER on IntegriCloud