summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-05-03 14:40:10 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-05-03 14:40:10 +0000
commit0446fbe45ecf417cc05cddc198a3503ea8d2bc81 (patch)
tree67d58564e3c6f02039d30c6fe82a776d90ffb0a3 /llvm/lib/Target/AMDGPU
parentbc876df3a5f0f25b7c43a90e4c35f553ca5362df (diff)
downloadbcm5719-llvm-0446fbe45ecf417cc05cddc198a3503ea8d2bc81.tar.gz
bcm5719-llvm-0446fbe45ecf417cc05cddc198a3503ea8d2bc81.zip
AMDGPU: Replace shrunk instruction with dummy implicit_def
This was broken if the original operand was killed. The kill flag would appear on both instructions, and fail the verifier. Keep the kill flag, but remove the operands from the old instruction. This has an added benefit of really reducing the use count for future folds. Ideally the pass would be structured more like what PeepholeOptimizer does to avoid this hack to avoid breaking instruction iterators. llvm-svn: 359891
Diffstat (limited to 'llvm/lib/Target/AMDGPU')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp12
1 files changed, 8 insertions, 4 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 8db2ee21603..dcfc39e6755 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -233,8 +233,6 @@ static bool updateOperand(FoldCandidate &Fold,
const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
- const TargetRegisterClass *Dst1RC = MRI.getRegClass(Dst1.getReg());
- unsigned NewReg1 = MRI.createVirtualRegister(Dst1RC);
MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
@@ -244,9 +242,15 @@ static bool updateOperand(FoldCandidate &Fold,
}
// Keep the old instruction around to avoid breaking iterators, but
- // replace the outputs with dummy registers.
+ // replace it with a dummy instruction to remove uses.
+ //
+ // FIXME: We should not invert how this pass looks at operands to avoid
+ // this. Should track set of foldable movs instead of looking for uses
+ // when looking at a use.
Dst0.setReg(NewReg0);
- Dst1.setReg(NewReg1);
+ for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
+ MI->RemoveOperand(I);
+ MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
if (Fold.isCommuted())
TII.commuteInstruction(*Inst32, false);
OpenPOWER on IntegriCloud