summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
diff options
context:
space:
mode:
authorNicolai Haehnle <nhaehnle@gmail.com>2017-11-28 08:41:50 +0000
committerNicolai Haehnle <nhaehnle@gmail.com>2017-11-28 08:41:50 +0000
commit39980dac0b1724cea2a64a2ee76ce35df6dbf0b6 (patch)
tree4198b5d61f222332bbee9588cbd95b22e3ff77d7 /llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
parenta46522f369ca9a84127293ed07ca955c1b155782 (diff)
downloadbcm5719-llvm-39980dac0b1724cea2a64a2ee76ce35df6dbf0b6.tar.gz
bcm5719-llvm-39980dac0b1724cea2a64a2ee76ce35df6dbf0b6.zip
AMDGPU: Consistently check for immediates in SIInstrInfo::FoldImmediate
Summary: The PeepholeOptimizer pass calls this function solely based on checking DefMI->isMoveImmediate(), which only checks the MoveImm bit of the instruction description. So it's up to FoldImmediate itself to properly check that DefMI *actually* moves from an immediate. I don't have a separate test case for this, but the next patch introduces a test case which happens to crash without this change. This error is caught by the assertion in MachineOperand::getImm(). Change-Id: I88e7cdbcf54d75e1a296822e6fe5f9a5f095bbf8 Reviewers: arsenm, rampitec Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits Differential Revision: https://reviews.llvm.org/D40342 llvm-svn: 319155
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIInstrInfo.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp45
1 files changed, 22 insertions, 23 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index e24d4faf934..8df1c58848e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1918,28 +1918,29 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (!MRI->hasOneNonDBGUse(Reg))
return false;
+ switch (DefMI.getOpcode()) {
+ default:
+ return false;
+ case AMDGPU::S_MOV_B64:
+ // TODO: We could fold 64-bit immediates, but this get compilicated
+ // when there are sub-registers.
+ return false;
+
+ case AMDGPU::V_MOV_B32_e32:
+ case AMDGPU::S_MOV_B32:
+ break;
+ }
+
+ const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
+ assert(ImmOp);
+ // FIXME: We could handle FrameIndex values here.
+ if (!ImmOp->isImm())
+ return false;
+
unsigned Opc = UseMI.getOpcode();
if (Opc == AMDGPU::COPY) {
bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
- switch (DefMI.getOpcode()) {
- default:
- return false;
- case AMDGPU::S_MOV_B64:
- // TODO: We could fold 64-bit immediates, but this get compilicated
- // when there are sub-registers.
- return false;
-
- case AMDGPU::V_MOV_B32_e32:
- case AMDGPU::S_MOV_B32:
- break;
- }
unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
- const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
- assert(ImmOp);
- // FIXME: We could handle FrameIndex values here.
- if (!ImmOp->isImm()) {
- return false;
- }
UseMI.setDesc(get(NewOpc));
UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
@@ -1953,15 +1954,13 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (hasAnyModifiersSet(UseMI))
return false;
- const MachineOperand &ImmOp = DefMI.getOperand(1);
-
// If this is a free constant, there's no reason to do this.
// TODO: We could fold this here instead of letting SIFoldOperands do it
// later.
MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
// Any src operand can be used for the legality check.
- if (isInlineConstant(UseMI, *Src0, ImmOp))
+ if (isInlineConstant(UseMI, *Src0, *ImmOp))
return false;
bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
@@ -1979,7 +1978,7 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
// We need to swap operands 0 and 1 since madmk constant is at operand 1.
- const int64_t Imm = DefMI.getOperand(1).getImm();
+ const int64_t Imm = ImmOp->getImm();
// FIXME: This would be a lot easier if we could return a new instruction
// instead of having to modify in place.
@@ -2024,7 +2023,7 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
return false;
- const int64_t Imm = DefMI.getOperand(1).getImm();
+ const int64_t Imm = ImmOp->getImm();
// FIXME: This would be a lot easier if we could return a new instruction
// instead of having to modify in place.
OpenPOWER on IntegriCloud