summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-10-07 19:07:19 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-10-07 19:07:19 +0000
commit0b2ea91d6d162c3d5af824729ff3f925d163f8ac (patch)
tree077f02c42224099220bf3ac0fdd5bc2a1a76523f /llvm/lib/Target
parent578fa2819fa64b60dfec66a3b970634c71a39841 (diff)
downloadbcm5719-llvm-0b2ea91d6d162c3d5af824729ff3f925d163f8ac.tar.gz
bcm5719-llvm-0b2ea91d6d162c3d5af824729ff3f925d163f8ac.zip
AMDGPU/GlobalISel: Use S_MOV_B64 for inline constants
This hides some defects in SIFoldOperands when the immediates are split. llvm-svn: 373943
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp47
1 files changed, 27 insertions, 20 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 28ebbd9101c..aa165d4ce21 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1472,31 +1472,38 @@ bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
- DebugLoc DL = I.getDebugLoc();
- const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
- &AMDGPU::VGPR_32RegClass;
- Register LoReg = MRI->createVirtualRegister(RC);
- Register HiReg = MRI->createVirtualRegister(RC);
- const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
-
- BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
- .addImm(Imm.trunc(32).getZExtValue());
-
- BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
- .addImm(Imm.ashr(32).getZExtValue());
-
- const MachineInstr *RS =
- BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
- .addReg(LoReg)
- .addImm(AMDGPU::sub0)
- .addReg(HiReg)
- .addImm(AMDGPU::sub1);
+ const DebugLoc &DL = I.getDebugLoc();
+
+ APInt Imm(Size, I.getOperand(1).getImm());
+
+ MachineInstr *ResInst;
+ if (IsSgpr && TII.isInlineConstant(Imm)) {
+ ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
+ .addImm(I.getOperand(1).getImm());
+ } else {
+ const TargetRegisterClass *RC = IsSgpr ?
+ &AMDGPU::SReg_32_XM0RegClass : &AMDGPU::VGPR_32RegClass;
+ Register LoReg = MRI->createVirtualRegister(RC);
+ Register HiReg = MRI->createVirtualRegister(RC);
+
+ BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
+ .addImm(Imm.trunc(32).getZExtValue());
+
+ BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
+ .addImm(Imm.ashr(32).getZExtValue());
+
+ ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
+ .addReg(LoReg)
+ .addImm(AMDGPU::sub0)
+ .addReg(HiReg)
+ .addImm(AMDGPU::sub1);
+ }
// We can't call constrainSelectedInstRegOperands here, because it doesn't
// work for target independent opcodes
I.eraseFromParent();
const TargetRegisterClass *DstRC =
- TRI.getConstrainedRegClassForOperand(RS->getOperand(0), *MRI);
+ TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
if (!DstRC)
return true;
return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
OpenPOWER on IntegriCloud