summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-09-12 23:46:46 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-09-12 23:46:46 +0000
commit8382ce5f1b099e4cf8b1e15fe9efb6963740b6cc (patch)
treee1fd3f80d2f8fc1ace55debac6a7b14b9cb6894d /llvm/lib
parent4a8916cf1a45a9ee5a95cc011f60ce10a02ff196 (diff)
downloadbcm5719-llvm-8382ce5f1b099e4cf8b1e15fe9efb6963740b6cc.tar.gz
bcm5719-llvm-8382ce5f1b099e4cf8b1e15fe9efb6963740b6cc.zip
AMDGPU: Inline constant when materalizing FI with add on gfx9
This was relying on the SGPR usable for the carry out clobber to also be used for the input. There was no carry out on gfx9. With no carry out clobber to worry about, so the literal can just be directly used with a VOP2 add. llvm-svn: 371791
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp7
2 files changed, 6 insertions, 3 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index fe57435204a..3386f80beb8 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -6110,7 +6110,7 @@ MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
Register DestReg,
RegScavenger &RS) const {
if (ST.hasAddNoCarry())
- return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
+ return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg);
Register UnusedCarry = RS.scavengeRegister(RI.getBoolRC(), I, 0, false);
// TODO: Users need to deal with this.
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 235de006a1d..7967d9c9fb9 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1285,12 +1285,15 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
.addImm(ST.getWavefrontSizeLog2())
.addReg(DiffReg, RegState::Kill);
+ const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
+
// TODO: Fold if use instruction is another add of a constant.
- if (AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
+ if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
// FIXME: This can fail
MIB.addImm(Offset);
MIB.addReg(ScaledReg, RegState::Kill);
- MIB.addImm(0); // clamp bit
+ if (!IsVOP2)
+ MIB.addImm(0); // clamp bit
} else {
Register ConstOffsetReg =
RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MIB, 0, false);
OpenPOWER on IntegriCloud