summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
diff options
context:
space:
mode:
authorFrancis Visoiu Mistrih <francisvm@yahoo.com>2017-11-30 12:12:19 +0000
committerFrancis Visoiu Mistrih <francisvm@yahoo.com>2017-11-30 12:12:19 +0000
commit93ef145862e140ed880fd2eca404dc2641a12093 (patch)
tree6e29f9874fc995008ef8e53a6375b06de67d33e6 /llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
parentbfb8fa5a160a943d9cf8d7254cf4375f2c135f2d (diff)
downloadbcm5719-llvm-93ef145862e140ed880fd2eca404dc2641a12093.tar.gz
bcm5719-llvm-93ef145862e140ed880fd2eca404dc2641a12093.zip
[CodeGen] Print "%vreg0" as "%0" in both MIR and debug output
As part of the unification of the debug format and the MIR format, avoid printing "vreg" for virtual registers (which is one of the current MIR possibilities). Basically: * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E "s/%vreg([0-9]+)/%\1/g" * grep -nr '%vreg' . and fix if needed * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E "s/ vreg([0-9]+)/ %\1/g" * grep -nr 'vreg[0-9]\+' . and fix if needed Differential Revision: https://reviews.llvm.org/D40420 llvm-svn: 319427
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp34
1 files changed, 17 insertions, 17 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 34b1f758f7b..e9b381ce89b 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -14,46 +14,46 @@
/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
-/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// %0 <sgpr> = SCALAR_INST
+/// %1 <vsrc> = COPY %0 <sgpr>
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+/// %4 <vsrc> = PHI %1 <vsrc>, <BB#0>, %3 <vrsc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
///
///
/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
/// code will look like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <vsrc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now that the result of the PHI instruction is an SGPR, the register
-/// allocator is now forced to constrain the register class of %vreg3 to
+/// allocator is now forced to constrain the register class of %3 to
/// <sgpr> so we end up with final code like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <sgpr> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <sgpr>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now this code contains an illegal copy from a VGPR to an SGPR.
///
OpenPOWER on IntegriCloud