summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp34
1 files changed, 17 insertions, 17 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 34b1f758f7b..e9b381ce89b 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -14,46 +14,46 @@
/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
-/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// %0 <sgpr> = SCALAR_INST
+/// %1 <vsrc> = COPY %0 <sgpr>
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+/// %4 <vsrc> = PHI %1 <vsrc>, <BB#0>, %3 <vrsc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
///
///
/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
/// code will look like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <vsrc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now that the result of the PHI instruction is an SGPR, the register
-/// allocator is now forced to constrain the register class of %vreg3 to
+/// allocator is now forced to constrain the register class of %3 to
/// <sgpr> so we end up with final code like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <sgpr> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <sgpr>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now this code contains an illegal copy from a VGPR to an SGPR.
///
OpenPOWER on IntegriCloud