summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Liao <michael.hliao@gmail.com>2019-05-28 16:29:39 +0000
committerMichael Liao <michael.hliao@gmail.com>2019-05-28 16:29:39 +0000
commit7166843f1e10efbdd3a24fccb15ad33bfb6f0f70 (patch)
tree202b815d8e76d5898a3d5c8c9ca0baf58518159b
parent800db530d9fa1ed03a4facbb9e058413f4eca42c (diff)
downloadbcm5719-llvm-7166843f1e10efbdd3a24fccb15ad33bfb6f0f70.tar.gz
bcm5719-llvm-7166843f1e10efbdd3a24fccb15ad33bfb6f0f70.zip
[AMDGPU] Fix the mis-handling of `vreg_1` copied from scalar register.
Summary: - Don't treat the use of a scalar register as `vreg_1` an VGPR usage. Otherwise, that promotes that scalar register into vector one, which breaks the assumption that scalar register holds the lane mask. - The issue is triggered in a complicated case, where if the uses of that (lane mask) scalar register is legalized firstly before its definition, e.g., due to the mismatch block placement and its topological order or loop. In that cases, the legalization of PHI introduces the use of that scalar register as `vreg_1`. Reviewers: rampitec, nhaehnle, arsenm, alex-t Subscribers: kzhuravl, jvesely, wdng, dstuttard, tpr, t-tye, hiraditya, llvm-commits, yaxunl Tags: #llvm Differential Revision: https://reviews.llvm.org/D62492 llvm-svn: 361847
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp6
-rw-r--r--llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir44
2 files changed, 49 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index d20910baed3..fb151b4ffdc 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -588,7 +588,9 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
}
if (UseMI->isPHI()) {
- if (!TRI->isSGPRReg(MRI, Use.getReg()))
+ const TargetRegisterClass *UseRC = MRI.getRegClass(Use.getReg());
+ if (!TRI->isSGPRReg(MRI, Use.getReg()) &&
+ UseRC != &AMDGPU::VReg_1RegClass)
hasVGPRUses++;
continue;
}
@@ -633,8 +635,10 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
if ((!TRI->isVGPR(MRI, PHIRes) && RC0 != &AMDGPU::VReg_1RegClass) &&
(hasVGPRInput || hasVGPRUses > 1)) {
+ LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
TII->moveToVALU(MI);
} else {
+ LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI);
TII->legalizeOperands(MI, MDT);
}
diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
index 3d6e05cb2c9..306e62a4309 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
@@ -16,3 +16,47 @@ body: |
%6:sreg_32 = S_ADD_I32 %2:sreg_32, %5:sreg_32, implicit-def $scc
%7:sreg_32 = S_ADDC_U32 %3:sreg_32, %1:sreg_32, implicit-def $scc, implicit $scc
...
+
+# Test to ensure i1 phi copies from scalar registers through another phi won't
+# be promoted into vector ones.
+# GCN-LABEL: name: fix-sgpr-i1-phi-copies
+# GCN: .8:
+# GCN-NOT: vreg_64 = PHI
+---
+name: fix-sgpr-i1-phi-copies
+tracksRegLiveness: true
+body: |
+ bb.9:
+ S_BRANCH %bb.0
+
+ bb.4:
+ S_CBRANCH_SCC1 %bb.6, implicit undef $scc
+
+ bb.5:
+ %3:vreg_1 = IMPLICIT_DEF
+
+ bb.6:
+ %4:vreg_1 = PHI %2:sreg_64, %bb.4, %3:vreg_1, %bb.5
+
+ bb.7:
+ %5:vreg_1 = PHI %2:sreg_64, %bb.3, %4:vreg_1, %bb.6
+ S_BRANCH %bb.8
+
+ bb.0:
+ S_CBRANCH_SCC1 %bb.2, implicit undef $scc
+
+ bb.1:
+ %0:sreg_64 = S_MOV_B64 0
+ S_BRANCH %bb.3
+
+ bb.2:
+ %1:sreg_64 = S_MOV_B64 -1
+ S_BRANCH %bb.3
+
+ bb.3:
+ %2:sreg_64 = PHI %0:sreg_64, %bb.1, %1:sreg_64, %bb.2
+ S_CBRANCH_SCC1 %bb.7, implicit undef $scc
+ S_BRANCH %bb.4
+
+ bb.8:
+...
OpenPOWER on IntegriCloud