summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/CodeGen/MachineOperand.h4
-rw-r--r--llvm/include/llvm/CodeGen/TargetInstrInfo.h16
-rw-r--r--llvm/include/llvm/CodeGen/TargetRegisterInfo.h4
-rw-r--r--llvm/lib/CodeGen/DetectDeadLanes.cpp12
-rw-r--r--llvm/lib/CodeGen/LiveIntervalAnalysis.cpp20
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp2
-rw-r--r--llvm/lib/CodeGen/PeepholeOptimizer.cpp20
-rw-r--r--llvm/lib/CodeGen/RegAllocGreedy.cpp30
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp54
-rw-r--r--llvm/lib/CodeGen/RenameIndependentSubregs.cpp24
-rw-r--r--llvm/lib/CodeGen/SplitKit.cpp4
-rw-r--r--llvm/lib/CodeGen/TargetRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp22
-rw-r--r--llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp34
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.h16
-rw-r--r--llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/BitTracker.cpp16
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp22
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp34
-rw-r--r--llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp70
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenInsert.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPeephole.cpp40
-rw-r--r--llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXPeephole.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp38
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCMIPeephole.cpp10
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp40
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll56
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-output-queue.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/liveness.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/subreg-intervals.mir4
-rw-r--r--llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll4
-rw-r--r--llvm/test/CodeGen/ARM/Windows/dbzchk.ll2
-rw-r--r--llvm/test/CodeGen/ARM/crash-greedy.ll2
-rw-r--r--llvm/test/CodeGen/ARM/misched-copy-arm.ll6
-rw-r--r--llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir24
-rw-r--r--llvm/test/CodeGen/ARM/misched-int-basic.mir14
-rw-r--r--llvm/test/CodeGen/ARM/single-issue-r52.mir14
-rw-r--r--llvm/test/CodeGen/ARM/subreg-remat.ll8
-rw-r--r--llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll8
-rw-r--r--llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll8
-rw-r--r--llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir4
-rw-r--r--llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll2
-rw-r--r--llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll2
-rw-r--r--llvm/test/CodeGen/MIR/AArch64/spill-fold.mir2
-rw-r--r--llvm/test/CodeGen/PowerPC/quadint-return.ll4
-rw-r--r--llvm/test/CodeGen/WebAssembly/dbgvalue.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll12
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll2
-rw-r--r--llvm/test/CodeGen/X86/cmovcmov.ll10
-rw-r--r--llvm/test/CodeGen/X86/coalescer-dce.ll28
-rw-r--r--llvm/test/CodeGen/X86/crash.ll6
-rw-r--r--llvm/test/CodeGen/X86/handle-move.ll18
-rw-r--r--llvm/test/CodeGen/X86/invalid-liveness.mir6
-rw-r--r--llvm/test/CodeGen/X86/liveness-local-regalloc.ll2
-rw-r--r--llvm/test/CodeGen/X86/misched-copy.ll2
-rw-r--r--llvm/test/CodeGen/X86/norex-subreg.ll6
-rw-r--r--llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll8
-rw-r--r--llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir6
79 files changed, 523 insertions, 523 deletions
diff --git a/llvm/include/llvm/CodeGen/MachineOperand.h b/llvm/include/llvm/CodeGen/MachineOperand.h
index f477745867e..64889eb3a2d 100644
--- a/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -116,9 +116,9 @@ private:
/// the same register. In that case, the instruction may depend on those
/// operands reading the same dont-care value. For example:
///
- /// %vreg1<def> = XOR %vreg2<undef>, %vreg2<undef>
+ /// %1<def> = XOR %2<undef>, %2<undef>
///
- /// Any register can be used for %vreg2, and its value doesn't matter, but
+ /// Any register can be used for %2, and its value doesn't matter, but
/// the two operands must be the same register.
///
bool IsUndef : 1;
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 6770e503e61..c4a3865e7f0 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -422,10 +422,10 @@ public:
/// and \p DefIdx.
/// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
/// the list is modeled as <Reg:SubReg, SubIdx>.
- /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
+ /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
/// two elements:
- /// - vreg1:sub1, sub0
- /// - vreg2<:0>, sub1
+ /// - %1:sub1, sub0
+ /// - %2<:0>, sub1
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
@@ -442,8 +442,8 @@ public:
/// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
/// and \p DefIdx.
/// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
- /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
- /// - vreg1:sub1, sub0
+ /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+ /// - %1:sub1, sub0
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
@@ -460,9 +460,9 @@ public:
/// and \p DefIdx.
/// \p [out] BaseReg and \p [out] InsertedReg contain
/// the equivalent inputs of INSERT_SUBREG.
- /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
- /// - BaseReg: vreg0:sub0
- /// - InsertedReg: vreg1:sub1, sub3
+ /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+ /// - BaseReg: %0:sub0
+ /// - InsertedReg: %1:sub1, sub3
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 308216b7998..92d38d51fee 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -1138,8 +1138,8 @@ struct VirtReg2IndexFunctor {
///
/// The format is:
/// %noreg - NoRegister
-/// %vreg5 - a virtual register.
-/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
+/// %5 - a virtual register.
+/// %5:sub_8bit - a virtual register with sub-register index (with TRI).
/// %eax - a physical register
/// %physreg17 - a physical register when no TRI instance given.
///
diff --git a/llvm/lib/CodeGen/DetectDeadLanes.cpp b/llvm/lib/CodeGen/DetectDeadLanes.cpp
index ef4e2aaaf48..b82876e1c85 100644
--- a/llvm/lib/CodeGen/DetectDeadLanes.cpp
+++ b/llvm/lib/CodeGen/DetectDeadLanes.cpp
@@ -17,12 +17,12 @@
/// when subregisters are involved.
///
/// Example:
-/// %vreg0 = some definition
-/// %vreg1 = IMPLICIT_DEF
-/// %vreg2 = REG_SEQUENCE %vreg0, sub0, %vreg1, sub1
-/// %vreg3 = EXTRACT_SUBREG %vreg2, sub1
-/// = use %vreg3
-/// The %vreg0 definition is dead and %vreg3 contains an undefined value.
+/// %0 = some definition
+/// %1 = IMPLICIT_DEF
+/// %2 = REG_SEQUENCE %0, sub0, %1, sub1
+/// %3 = EXTRACT_SUBREG %2, sub1
+/// = use %3
+/// The %0 definition is dead and %3 contains an undefined value.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index c55519387d1..fb7fbe7f1c2 100644
--- a/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -698,11 +698,11 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// Check if any of the regunits are live beyond the end of RI. That could
// happen when a physreg is defined as a copy of a virtreg:
//
- // %eax = COPY %vreg5
- // FOO %vreg5 <--- MI, cancel kill because %eax is live.
+ // %eax = COPY %5
+ // FOO %5 <--- MI, cancel kill because %eax is live.
// BAR %eax<kill>
//
- // There should be no kill flag on FOO when %vreg5 is rewritten as %eax.
+ // There should be no kill flag on FOO when %5 is rewritten as %eax.
for (auto &RUP : RU) {
const LiveRange &RURange = *RUP.first;
LiveRange::const_iterator &I = RUP.second;
@@ -719,13 +719,13 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// When reading a partial undefined value we must not add a kill flag.
// The regalloc might have used the undef lane for something else.
// Example:
- // %vreg1 = ... ; R32: %vreg1
- // %vreg2:high16 = ... ; R64: %vreg2
- // = read %vreg2<kill> ; R64: %vreg2
- // = read %vreg1 ; R32: %vreg1
- // The <kill> flag is correct for %vreg2, but the register allocator may
- // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0
- // are actually never written by %vreg2. After assignment the <kill>
+ // %1 = ... ; R32: %1
+ // %2:high16 = ... ; R64: %2
+ // = read %2<kill> ; R64: %2
+ // = read %1 ; R32: %1
+ // The <kill> flag is correct for %2, but the register allocator may
+ // assign R0L to %1, and R0 to %2 because the low 32bits of R0
+ // are actually never written by %2. After assignment the <kill>
// flag at the read instruction is invalid.
LaneBitmask DefinedLanesMask;
if (!SRs.empty()) {
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 4f6eb428c8e..83a9e1a58c0 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1961,7 +1961,7 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
if (MOI->isDef()) {
if (Sub != 0) {
hasSubRegDef = true;
- // An operand vreg0:sub0<def> reads vreg0:sub1..n. Invert the lane
+ // An operand %0:sub0<def> reads %0:sub1..n. Invert the lane
// mask for subregister defs. Read-undef defs will be handled by
// readsReg below.
SLM = ~SLM;
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index dfad7615bca..e4c2aa46478 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -1453,10 +1453,10 @@ bool PeepholeOptimizer::foldImmediate(
// only the first copy is considered.
//
// e.g.
-// %vreg1 = COPY %vreg0
-// %vreg2 = COPY %vreg0:sub1
+// %1 = COPY %0
+// %2 = COPY %0:sub1
//
-// Should replace %vreg2 uses with %vreg1:sub1
+// Should replace %2 uses with %1:sub1
bool PeepholeOptimizer::foldRedundantCopy(
MachineInstr *MI, SmallSet<unsigned, 4> &CopySrcRegs,
DenseMap<unsigned, MachineInstr *> &CopyMIs) {
@@ -1621,16 +1621,16 @@ bool PeepholeOptimizer::findTargetRecurrence(
/// from the phi. For example, if there is a recurrence of
///
/// LoopHeader:
-/// %vreg1 = phi(%vreg0, %vreg100)
+/// %1 = phi(%0, %100)
/// LoopLatch:
-/// %vreg0<def, tied1> = ADD %vreg2<def, tied0>, %vreg1
+/// %0<def, tied1> = ADD %2<def, tied0>, %1
///
-/// , the fact that vreg0 and vreg2 are in the same tied operands set makes
+/// , the fact that %0 and %2 are in the same tied operands set makes
/// the coalescing of copy instruction generated from the phi in
-/// LoopHeader(i.e. %vreg1 = COPY %vreg0) impossible, because %vreg1 and
-/// %vreg2 have overlapping live range. This introduces additional move
-/// instruction to the final assembly. However, if we commute %vreg2 and
-/// %vreg1 of ADD instruction, the redundant move instruction can be
+/// LoopHeader(i.e. %1 = COPY %0) impossible, because %1 and
+/// %2 have overlapping live range. This introduces additional move
+/// instruction to the final assembly. However, if we commute %2 and
+/// %1 of ADD instruction, the redundant move instruction can be
/// avoided.
bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) {
SmallSet<unsigned, 2> TargetRegs;
diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 39676fed3d0..c3d94d8a5eb 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -1396,30 +1396,30 @@ BlockFrequency RAGreedy::calcSpillCost() {
/// Such sequences are created in 2 scenarios:
///
/// Scenario #1:
-/// vreg0 is evicted from physreg0 by vreg1.
-/// Evictee vreg0 is intended for region splitting with split candidate
-/// physreg0 (the reg vreg0 was evicted from).
+/// %0 is evicted from physreg0 by %1.
+/// Evictee %0 is intended for region splitting with split candidate
+/// physreg0 (the reg %0 was evicted from).
/// Region splitting creates a local interval because of interference with the
-/// evictor vreg1 (normally region spliitting creates 2 interval, the "by reg"
+/// evictor %1 (normally region spliitting creates 2 interval, the "by reg"
/// and "by stack" intervals and local interval created when interference
/// occurs).
-/// One of the split intervals ends up evicting vreg2 from physreg1.
-/// Evictee vreg2 is intended for region splitting with split candidate
+/// One of the split intervals ends up evicting %2 from physreg1.
+/// Evictee %2 is intended for region splitting with split candidate
/// physreg1.
-/// One of the split intervals ends up evicting vreg3 from physreg2, etc.
+/// One of the split intervals ends up evicting %3 from physreg2, etc.
///
/// Scenario #2
-/// vreg0 is evicted from physreg0 by vreg1.
-/// vreg2 is evicted from physreg2 by vreg3 etc.
-/// Evictee vreg0 is intended for region splitting with split candidate
+/// %0 is evicted from physreg0 by %1.
+/// %2 is evicted from physreg2 by %3 etc.
+/// Evictee %0 is intended for region splitting with split candidate
/// physreg1.
/// Region splitting creates a local interval because of interference with the
-/// evictor vreg1.
-/// One of the split intervals ends up evicting back original evictor vreg1
-/// from physreg0 (the reg vreg0 was evicted from).
-/// Another evictee vreg2 is intended for region splitting with split candidate
+/// evictor %1.
+/// One of the split intervals ends up evicting back original evictor %1
+/// from physreg0 (the reg %0 was evicted from).
+/// Another evictee %2 is intended for region splitting with split candidate
/// physreg1.
-/// One of the split intervals ends up evicting vreg3 from physreg2, etc.
+/// One of the split intervals ends up evicting %3 from physreg2, etc.
///
/// \param Evictee The register considered to be split.
/// \param Cand The split candidate that determines the physical register
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 81f9a343dc1..128a07cef10 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -228,9 +228,9 @@ namespace {
/// flag.
/// This can happen when undef uses were previously concealed by a copy
/// which we coalesced. Example:
- /// %vreg0:sub0<def,read-undef> = ...
- /// %vreg1 = COPY %vreg0 <-- Coalescing COPY reveals undef
- /// = use %vreg1:sub1 <-- hidden undef use
+ /// %0:sub0<def,read-undef> = ...
+ /// %1 = COPY %0 <-- Coalescing COPY reveals undef
+ /// = use %1:sub1 <-- hidden undef use
void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
MachineOperand &MO, unsigned SubRegIdx);
@@ -1143,10 +1143,10 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
NewMI.setDebugLoc(DL);
// In a situation like the following:
- // %vreg0:subreg = instr ; DefMI, subreg = DstIdx
- // %vreg1 = copy %vreg0:subreg ; CopyMI, SrcIdx = 0
- // instead of widening %vreg1 to the register class of %vreg0 simply do:
- // %vreg1 = instr
+ // %0:subreg = instr ; DefMI, subreg = DstIdx
+ // %1 = copy %0:subreg ; CopyMI, SrcIdx = 0
+ // instead of widening %1 to the register class of %0 simply do:
+ // %1 = instr
const TargetRegisterClass *NewRC = CP.getNewRC();
if (DstIdx != 0) {
MachineOperand &DefMO = NewMI.getOperand(0);
@@ -1226,12 +1226,12 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// This could happen if the rematerialization instruction is rematerializing
// more than actually is used in the register.
// An example would be:
- // vreg1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
+ // %1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
// ; Copying only part of the register here, but the rest is undef.
- // vreg2:sub_16bit<def, read-undef> = COPY vreg1:sub_16bit
+ // %2:sub_16bit<def, read-undef> = COPY %1:sub_16bit
// ==>
// ; Materialize all the constants but only using one
- // vreg2 = LOAD_CONSTANTS 5, 8
+ // %2 = LOAD_CONSTANTS 5, 8
//
// at this point for the part that wasn't defined before we could have
// subranges missing the definition.
@@ -1254,11 +1254,11 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// Make sure that the subrange for resultant undef is removed
// For example:
- // vreg1:sub1<def,read-undef> = LOAD CONSTANT 1
- // vreg2<def> = COPY vreg1
+ // %1:sub1<def,read-undef> = LOAD CONSTANT 1
+ // %2<def> = COPY %1
// ==>
- // vreg2:sub1<def, read-undef> = LOAD CONSTANT 1
- // ; Correct but need to remove the subrange for vreg2:sub0
+ // %2:sub1<def, read-undef> = LOAD CONSTANT 1
+ // ; Correct but need to remove the subrange for %2:sub0
// ; as it is now undef
if (NewIdx != 0 && DstInt.hasSubRanges()) {
// The affected subregister segments can be removed.
@@ -1292,15 +1292,15 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// Otherwise, variables that live through may miss some
// interferences, thus creating invalid allocation.
// E.g., i386 code:
- // vreg1 = somedef ; vreg1 GR8
- // vreg2 = remat ; vreg2 GR32
- // CL = COPY vreg2.sub_8bit
- // = somedef vreg1 ; vreg1 GR8
+ // %1 = somedef ; %1 GR8
+ // %2 = remat ; %2 GR32
+ // CL = COPY %2.sub_8bit
+ // = somedef %1 ; %1 GR8
// =>
- // vreg1 = somedef ; vreg1 GR8
+ // %1 = somedef ; %1 GR8
// ECX<def, dead> = remat ; CL<imp-def>
- // = somedef vreg1 ; vreg1 GR8
- // vreg1 will see the inteferences with CL but not with CH since
+ // = somedef %1 ; %1 GR8
+ // %1 will see the inteferences with CL but not with CH since
// no live-ranges would have been created for ECX.
// Fix that!
SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
@@ -1353,9 +1353,9 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
// ProcessImpicitDefs may leave some copies of <undef> values, it only removes
// local variables. When we have a copy like:
//
- // %vreg1 = COPY %vreg2<undef>
+ // %1 = COPY %2<undef>
//
- // We delete the copy and remove the corresponding value number from %vreg1.
+ // We delete the copy and remove the corresponding value number from %1.
// Any uses of that value number are marked as <undef>.
// Note that we do not query CoalescerPair here but redo isMoveInstr as the
@@ -1820,18 +1820,18 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
MachineInstr *CopyMI;
if (CP.isFlipped()) {
// Physreg is copied into vreg
- // %vregY = COPY %x
+ // %y = COPY %physreg_x
// ... //< no other def of %x here
- // use %vregY
+ // use %y
// =>
// ...
// use %x
CopyMI = MRI->getVRegDef(SrcReg);
} else {
// VReg is copied into physreg:
- // %vregX = def
+ // %y = def
// ... //< no other def or use of %y here
- // %y = COPY %vregX
+ // %y = COPY %physreg_x
// =>
// %y = def
// ...
diff --git a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp
index 72b7960f327..b423d674364 100644
--- a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp
+++ b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp
@@ -10,20 +10,20 @@
/// Rename independent subregisters looks for virtual registers with
/// independently used subregisters and renames them to new virtual registers.
/// Example: In the following:
-/// %vreg0:sub0<read-undef> = ...
-/// %vreg0:sub1 = ...
-/// use %vreg0:sub0
-/// %vreg0:sub0 = ...
-/// use %vreg0:sub0
-/// use %vreg0:sub1
+/// %0:sub0<read-undef> = ...
+/// %0:sub1 = ...
+/// use %0:sub0
+/// %0:sub0 = ...
+/// use %0:sub0
+/// use %0:sub1
/// sub0 and sub1 are never used together, and we have two independent sub0
/// definitions. This pass will rename to:
-/// %vreg0:sub0<read-undef> = ...
-/// %vreg1:sub1<read-undef> = ...
-/// use %vreg1:sub1
-/// %vreg2:sub1<read-undef> = ...
-/// use %vreg2:sub1
-/// use %vreg0:sub0
+/// %0:sub0<read-undef> = ...
+/// %1:sub1<read-undef> = ...
+/// use %1:sub1
+/// %2:sub1<read-undef> = ...
+/// use %2:sub1
+/// use %0:sub0
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp
index 59c5798ab49..49f31333acf 100644
--- a/llvm/lib/CodeGen/SplitKit.cpp
+++ b/llvm/lib/CodeGen/SplitKit.cpp
@@ -1375,9 +1375,9 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
continue;
// The problem here can be that the new register may have been created
// for a partially defined original register. For example:
- // %vreg827:subreg_hireg<def,read-undef> = ...
+ // %0:subreg_hireg<def,read-undef> = ...
// ...
- // %vreg828<def> = COPY %vreg827
+ // %1<def> = COPY %0
if (S.empty())
continue;
SubLRC.reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index cc5c1485608..721761eef61 100644
--- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -93,7 +93,7 @@ Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI,
else if (TargetRegisterInfo::isStackSlot(Reg))
OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
else if (TargetRegisterInfo::isVirtualRegister(Reg))
- OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg);
+ OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
else if (TRI && Reg < TRI->getNumRegs()) {
OS << '%';
printLowerCase(TRI->getName(Reg), OS);
@@ -134,7 +134,7 @@ Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
return Printable([Unit, TRI](raw_ostream &OS) {
if (TRI && TRI->isVirtualRegister(Unit)) {
- OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Unit);
+ OS << '%' << TargetRegisterInfo::virtReg2Index(Unit);
} else {
OS << printRegUnit(Unit, TRI);
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index bf5f0f624af..bc3c0a4a60e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2801,14 +2801,14 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
//
- // %vreg0<def> = COPY %sp; GPR64all:%vreg0
+ // %0<def> = COPY %sp; GPR64all:%0
//
// We explicitly chose GPR64all for the virtual register so such a copy might
// be eliminated by RegisterCoalescer. However, that may not be possible, and
- // %vreg0 may even spill. We can't spill %sp, and since it is in the GPR64all
+ // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
// register class, TargetInstrInfo::foldMemoryOperand() is going to try.
//
- // To prevent that, we are going to constrain the %vreg0 register class here.
+ // To prevent that, we are going to constrain the %0 register class here.
//
// <rdar://problem/11522048>
//
@@ -2830,7 +2830,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle the case where a copy is being spilled or filled but the source
// and destination register class don't match. For example:
//
- // %vreg0<def> = COPY %xzr; GPR64common:%vreg0
+ // %0<def> = COPY %xzr; GPR64common:%0
//
// In this case we can still safely fold away the COPY and generate the
// following spill code:
@@ -2840,16 +2840,16 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// This also eliminates spilled cross register class COPYs (e.g. between x and
// d regs) of the same size. For example:
//
- // %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
+ // %0<def> = COPY %1; GPR64:%0, FPR64:%1
//
// will be filled as
//
- // LDRDui %vreg0, fi<#0>
+ // LDRDui %0, fi<#0>
//
// instead of
//
- // LDRXui %vregTemp, fi<#0>
- // %vreg0 = FMOV %vregTemp
+ // LDRXui %Temp, fi<#0>
+ // %0 = FMOV %Temp
//
if (MI.isCopy() && Ops.size() == 1 &&
// Make sure we're only folding the explicit COPY defs/uses.
@@ -2886,7 +2886,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle cases like spilling def of:
//
- // %vreg0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%vreg0
+ // %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
//
// where the physical register source can be widened and stored to the full
// virtual reg destination stack slot, in this case producing:
@@ -2934,12 +2934,12 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle cases like filling use of:
//
- // %vreg0:sub_32<def,read-undef> = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1
+ // %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
//
// where we can load the full virtual reg source stack slot, into the subreg
// destination, in this case producing:
//
- // LDRWui %vreg0:sub_32<def,read-undef>, <fi#0>
+ // LDRWui %0:sub_32<def,read-undef>, <fi#0>
//
if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
const TargetRegisterClass *FillRC;
diff --git a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 972e61d376d..1bfa837bfb2 100644
--- a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -12,16 +12,16 @@
/// common data and/or have enough undef subreg using swizzle abilities.
///
/// For instance let's consider the following pseudo code :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
/// ...
-/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3
+/// %7<def> = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
+/// (swizzable Inst) %7, SwizzleMask : sub0, sub1, sub2, sub3
///
/// is turned into :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
/// ...
-/// vreg7<def> = INSERT_SUBREG vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3
+/// %7<def> = INSERT_SUBREG %4, sub3
+/// (swizzable Inst) %7, SwizzleMask : sub0, sub2, sub1, sub3
///
/// This allow regalloc to reduce register pressure for vector registers and
/// to reduce MOV count.
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 34b1f758f7b..e9b381ce89b 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -14,46 +14,46 @@
/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
-/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// %0 <sgpr> = SCALAR_INST
+/// %1 <vsrc> = COPY %0 <sgpr>
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+/// %4 <vsrc> = PHI %1 <vsrc>, <BB#0>, %3 <vrsc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
///
///
/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
/// code will look like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <vsrc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now that the result of the PHI instruction is an SGPR, the register
-/// allocator is now forced to constrain the register class of %vreg3 to
+/// allocator is now forced to constrain the register class of %3 to
/// <sgpr> so we end up with final code like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <sgpr> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <sgpr>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now this code contains an illegal copy from a VGPR to an SGPR.
///
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 2c52e16892c..52157408b36 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -290,11 +290,11 @@ void SIFoldOperands::foldOperand(
// copy since a subregister use tied to a full register def doesn't really
// make sense. e.g. don't fold:
//
- // %vreg1 = COPY %vreg0:sub1
- // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
+ // %1 = COPY %0:sub1
+ // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
//
// into
- // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
+ // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
return;
}
@@ -971,7 +971,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
// Prevent folding operands backwards in the function. For example,
// the COPY opcode must not be replaced by 1 in this example:
//
- // %vreg3<def> = COPY %vgpr0; VGPR_32:%vreg3
+ // %3<def> = COPY %vgpr0; VGPR_32:%3
// ...
// %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use>
MachineOperand &Dst = MI.getOperand(0);
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 5738077f989..bb8fa2c89fb 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -10,12 +10,12 @@
/// \file This pass tries to apply several peephole SDWA patterns.
///
/// E.g. original:
-/// V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
-/// V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
-/// V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
+/// V_LSHRREV_B32_e32 %0, 16, %1
+/// V_ADD_I32_e32 %2, %0, %3
+/// V_LSHLREV_B32_e32 %4, 16, %2
///
/// Replace:
-/// V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3
+/// V_ADD_I32_sdwa %4, %1, %3
/// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
///
//===----------------------------------------------------------------------===//
@@ -410,7 +410,7 @@ Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
}
// If this is not immediate then it can be copy of immediate value, e.g.:
- // %vreg1<def> = S_MOV_B32 255;
+ // %1<def> = S_MOV_B32 255;
if (Op.isReg()) {
for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
if (!isSameReg(Op, Def))
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 152b24599e9..4407a9d0f37 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1347,13 +1347,13 @@ bool SIRegisterInfo::shouldRewriteCopySrc(
// class.
//
// e.g. if we have something like
- // vreg0 = ...
- // vreg1 = ...
- // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
- // vreg3 = COPY vreg2, sub0
+ // %0 = ...
+ // %1 = ...
+ // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
+ // %3 = COPY %2, sub0
//
// We want to look through the COPY to find:
- // => vreg3 = COPY vreg0
+ // => %3 = COPY %0
// Plain copy.
return getCommonSubClass(DefRC, SrcRC) != nullptr;
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 6268b9ef2a3..f9505beea20 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1650,7 +1650,7 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
}
for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
- // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
+ // %12<def> = PICLDR %11, 0, pred:14, pred:%noreg
const MachineOperand &MO0 = MI0.getOperand(i);
const MachineOperand &MO1 = MI1.getOperand(i);
if (!MO0.isIdenticalTo(MO1))
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 2ff4b1100ee..d375f40d6e1 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -47,10 +47,10 @@ protected:
/// and \p DefIdx.
/// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
/// the list is modeled as <Reg:SubReg, SubIdx>.
- /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
+ /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
/// two elements:
- /// - vreg1:sub1, sub0
- /// - vreg2<:0>, sub1
+ /// - %1:sub1, sub0
+ /// - %2<:0>, sub1
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
@@ -63,8 +63,8 @@ protected:
/// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
/// and \p DefIdx.
/// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
- /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
- /// - vreg1:sub1, sub0
+ /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+ /// - %1:sub1, sub0
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
@@ -77,9 +77,9 @@ protected:
/// and \p DefIdx.
/// \p [out] BaseReg and \p [out] InsertedReg contain
/// the equivalent inputs of INSERT_SUBREG.
- /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
- /// - BaseReg: vreg0:sub0
- /// - InsertedReg: vreg1:sub1, sub3
+ /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+ /// - BaseReg: %0:sub0
+ /// - InsertedReg: %1:sub1, sub3
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 1c12c23c931..ef52bae3d76 100644
--- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -546,7 +546,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
return;
unsigned AndOpReg = RegN->getReg();
- DEBUG(dbgs() << "Examine %vreg" << TargetRegisterInfo::virtReg2Index(AndOpReg)
+ DEBUG(dbgs() << "Examine %" << TargetRegisterInfo::virtReg2Index(AndOpReg)
<< '\n');
// Examine the PHI insns in the MachineBasicBlock to found out the
@@ -574,9 +574,9 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
return;
} else {
// The PHI node looks like:
- // %vreg2<def> = PHI %vreg0, <BB#1>, %vreg1, <BB#3>
- // Trace each incoming definition, e.g., (%vreg0, BB#1) and (%vreg1, BB#3)
- // The AND operation can be removed if both %vreg0 in BB#1 and %vreg1 in
+ // %2<def> = PHI %0, <BB#1>, %1, <BB#3>
+ // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3)
+ // The AND operation can be removed if both %0 in BB#1 and %1 in
// BB#3 are defined with with a load matching the MaskN.
DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n');
unsigned PrevReg = -1;
diff --git a/llvm/lib/Target/Hexagon/BitTracker.cpp b/llvm/lib/Target/Hexagon/BitTracker.cpp
index 5e20d8ca0fd..4a10408d8c7 100644
--- a/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -18,16 +18,16 @@
// A "ref" value is associated with a BitRef structure, which indicates
// which virtual register, and which bit in that register is the origin
// of the value. For example, given an instruction
-// vreg2 = ASL vreg1, 1
-// assuming that nothing is known about bits of vreg1, bit 1 of vreg2
-// will be a "ref" to (vreg1, 0). If there is a subsequent instruction
-// vreg3 = ASL vreg2, 2
-// then bit 3 of vreg3 will be a "ref" to (vreg1, 0) as well.
+// %2 = ASL %1, 1
+// assuming that nothing is known about bits of %1, bit 1 of %2
+// will be a "ref" to (%1, 0). If there is a subsequent instruction
+// %3 = ASL %2, 2
+// then bit 3 of %3 will be a "ref" to (%1, 0) as well.
// The "bottom" case means that the bit's value cannot be determined,
// and that this virtual register actually defines it. The "bottom" case
// is discussed in detail in BitTracker.h. In fact, "bottom" is a "ref
-// to self", so for the vreg1 above, the bit 0 of it will be a "ref" to
-// (vreg1, 0), bit 1 will be a "ref" to (vreg1, 1), etc.
+// to self", so for the %1 above, the bit 0 of it will be a "ref" to
+// (%1, 0), bit 1 will be a "ref" to (%1, 1), etc.
//
// The tracker implements the Wegman-Zadeck algorithm, originally developed
// for SSA-based constant propagation. Each register is represented as
@@ -75,7 +75,7 @@ using BT = BitTracker;
namespace {
- // Local trickery to pretty print a register (without the whole "%vreg"
+ // Local trickery to pretty print a register (without the whole "%number"
// business).
struct printv {
printv(unsigned r) : R(r) {}
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index cbf1b0dc040..d3cb53e3594 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -895,7 +895,7 @@ bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
}
// Calculate the register class that matches Reg:Sub. For example, if
-// vreg1 is a double register, then vreg1:isub_hi would match the "int"
+// %1 is a double register, then %1:isub_hi would match the "int"
// register class.
const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
@@ -1246,11 +1246,11 @@ bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
// holds the bits for the entire register. To keep track of that, the
// argument Begin indicates where in Bits is the lowest-significant bit
// of the register used in operand OpN. For example, in instruction:
-// vreg1 = S2_lsr_i_r vreg2:isub_hi, 10
+// %1 = S2_lsr_i_r %2:isub_hi, 10
// the operand 1 is a 32-bit register, which happens to be a subregister
-// of the 64-bit register vreg2, and that subregister starts at position 32.
+// of the 64-bit register %2, and that subregister starts at position 32.
// In this case Begin=32, since Bits[32] would be the lowest-significant bit
-// of vreg2:isub_hi.
+// of %2:isub_hi.
bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
unsigned OpN, BitVector &Bits, uint16_t Begin) {
unsigned Opc = MI.getOpcode();
@@ -1356,11 +1356,11 @@ bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
// This pass can create copies between registers that don't have the
// exact same values. Updating the tracker has to involve updating
// all dependent cells. Example:
- // vreg1 = inst vreg2 ; vreg1 != vreg2, but used bits are equal
+ // %1 = inst %2 ; %1 != %2, but used bits are equal
//
- // vreg3 = copy vreg2 ; <- inserted
- // ... = vreg3 ; <- replaced from vreg2
- // Indirectly, we can create a "copy" between vreg1 and vreg2 even
+ // %3 = copy %2 ; <- inserted
+ // ... = %3 ; <- replaced from %2
+ // Indirectly, we can create a "copy" between %1 and %2 even
// though their exact values do not match.
BT.visit(*CopyI);
Changed = true;
@@ -2313,10 +2313,10 @@ bool BitSimplification::genBitSplit(MachineInstr *MI,
// Check for tstbit simplification opportunity, where the bit being checked
// can be tracked back to another register. For example:
-// vreg2 = S2_lsr_i_r vreg1, 5
-// vreg3 = S2_tstbit_i vreg2, 0
+// %2 = S2_lsr_i_r %1, 5
+// %3 = S2_tstbit_i %2, 0
// =>
-// vreg3 = S2_tstbit_i vreg1, 5
+// %3 = S2_tstbit_i %1, 5
bool BitSimplification::simplifyTstbit(MachineInstr *MI,
BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
unsigned Opc = MI->getOpcode();
diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
index 9ca7e5f0a3c..1953439fc3e 100644
--- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -368,7 +368,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
}
}
// Defs and clobbers can overlap, e.g.
- // %d0<def,dead> = COPY %vreg5, %r0<imp-def>, %r1<imp-def>
+ // %d0<def,dead> = COPY %5, %r0<imp-def>, %r1<imp-def>
for (RegisterRef R : Defs)
Clobbers.erase(R);
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index e7c3290d151..9a8762a48fd 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -1974,7 +1974,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
{
const MachineOperand &VO = MI.getOperand(1);
// The operand of CONST32 can be a blockaddress, e.g.
- // %vreg0<def> = CONST32 <blockaddress(@eat, %l)>
+ // %0<def> = CONST32 <blockaddress(@eat, %l)>
// Do this check for all instructions for safety.
if (!VO.isImm())
return false;
diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index b2244107ac4..4a6100d02fc 100644
--- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -25,37 +25,37 @@
//
// Example:
//
-// %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
-// %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-// J2_jumpt %vreg41<kill>, <BB#5>, %pc<imp-def,dead>
+// %40<def> = L2_loadrub_io %39<kill>, 1
+// %41<def> = S2_tstbit_i %40<kill>, 0
+// J2_jumpt %41<kill>, <BB#5>, %pc<imp-def,dead>
// J2_jump <BB#4>, %pc<imp-def,dead>
// Successors according to CFG: BB#4(62) BB#5(62)
//
// BB#4: derived from LLVM BB %if.then
// Predecessors according to CFG: BB#3
-// %vreg11<def> = A2_addp %vreg6, %vreg10
-// S2_storerd_io %vreg32, 16, %vreg11
+// %11<def> = A2_addp %6, %10
+// S2_storerd_io %32, 16, %11
// Successors according to CFG: BB#5
//
// BB#5: derived from LLVM BB %if.end
// Predecessors according to CFG: BB#3 BB#4
-// %vreg12<def> = PHI %vreg6, <BB#3>, %vreg11, <BB#4>
-// %vreg13<def> = A2_addp %vreg7, %vreg12
-// %vreg42<def> = C2_cmpeqi %vreg9, 10
-// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+// %12<def> = PHI %6, <BB#3>, %11, <BB#4>
+// %13<def> = A2_addp %7, %12
+// %42<def> = C2_cmpeqi %9, 10
+// J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
// J2_jump <BB#6>, %pc<imp-def,dead>
// Successors according to CFG: BB#6(4) BB#3(124)
//
// would become:
//
-// %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
-// %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-// spec-> %vreg11<def> = A2_addp %vreg6, %vreg10
-// pred-> S2_pstorerdf_io %vreg41, %vreg32, 16, %vreg11
-// %vreg46<def> = PS_pselect %vreg41, %vreg6, %vreg11
-// %vreg13<def> = A2_addp %vreg7, %vreg46
-// %vreg42<def> = C2_cmpeqi %vreg9, 10
-// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+// %40<def> = L2_loadrub_io %39<kill>, 1
+// %41<def> = S2_tstbit_i %40<kill>, 0
+// spec-> %11<def> = A2_addp %6, %10
+// pred-> S2_pstorerdf_io %41, %32, 16, %11
+// %46<def> = PS_pselect %41, %6, %11
+// %13<def> = A2_addp %7, %46
+// %42<def> = C2_cmpeqi %9, 10
+// J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
// J2_jump <BB#6>, %pc<imp-def,dead>
// Successors according to CFG: BB#6 BB#3
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 51c3b784370..86645ddf913 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -17,33 +17,33 @@
//
// Liveness tracking aside, the main functionality of this pass is divided
// into two steps. The first step is to replace an instruction
-// vreg0 = C2_mux vreg1, vreg2, vreg3
+// %0 = C2_mux %1, %2, %3
// with a pair of conditional transfers
-// vreg0 = A2_tfrt vreg1, vreg2
-// vreg0 = A2_tfrf vreg1, vreg3
+// %0 = A2_tfrt %1, %2
+// %0 = A2_tfrf %1, %3
// It is the intention that the execution of this pass could be terminated
// after this step, and the code generated would be functionally correct.
//
-// If the uses of the source values vreg1 and vreg2 are kills, and their
+// If the uses of the source values %1 and %2 are kills, and their
// definitions are predicable, then in the second step, the conditional
// transfers will then be rewritten as predicated instructions. E.g.
-// vreg0 = A2_or vreg1, vreg2
-// vreg3 = A2_tfrt vreg99, vreg0<kill>
+// %0 = A2_or %1, %2
+// %3 = A2_tfrt %99, %0<kill>
// will be rewritten as
-// vreg3 = A2_port vreg99, vreg1, vreg2
+// %3 = A2_port %99, %1, %2
//
// This replacement has two variants: "up" and "down". Consider this case:
-// vreg0 = A2_or vreg1, vreg2
+// %0 = A2_or %1, %2
// ... [intervening instructions] ...
-// vreg3 = A2_tfrt vreg99, vreg0<kill>
+// %3 = A2_tfrt %99, %0<kill>
// variant "up":
-// vreg3 = A2_port vreg99, vreg1, vreg2
-// ... [intervening instructions, vreg0->vreg3] ...
+// %3 = A2_port %99, %1, %2
+// ... [intervening instructions, %0->vreg3] ...
// [deleted]
// variant "down":
// [deleted]
// ... [intervening instructions] ...
-// vreg3 = A2_port vreg99, vreg1, vreg2
+// %3 = A2_port %99, %1, %2
//
// Both, one or none of these variants may be valid, and checks are made
// to rule out inapplicable variants.
@@ -51,13 +51,13 @@
// As an additional optimization, before either of the two steps above is
// executed, the pass attempts to coalesce the target register with one of
// the source registers, e.g. given an instruction
-// vreg3 = C2_mux vreg0, vreg1, vreg2
-// vreg3 will be coalesced with either vreg1 or vreg2. If this succeeds,
+// %3 = C2_mux %0, %1, %2
+// %3 will be coalesced with either %1 or %2. If this succeeds,
// the instruction would then be (for example)
-// vreg3 = C2_mux vreg0, vreg3, vreg2
+// %3 = C2_mux %0, %3, %2
// and, under certain circumstances, this could result in only one predicated
// instruction:
-// vreg3 = A2_tfrf vreg0, vreg2
+// %3 = A2_tfrf %0, %2
//
// Splitting a definition of a register into two predicated transfers
@@ -65,18 +65,18 @@
// will see both instructions as actual definitions, and will mark the
// first one as dead. The definition is not actually dead, and this
// situation will need to be fixed. For example:
-// vreg1<def,dead> = A2_tfrt ... ; marked as dead
-// vreg1<def> = A2_tfrf ...
+// %1<def,dead> = A2_tfrt ... ; marked as dead
+// %1<def> = A2_tfrf ...
//
// Since any of the individual predicated transfers may end up getting
// removed (in case it is an identity copy), some pre-existing def may
// be marked as dead after live interval recomputation:
-// vreg1<def,dead> = ... ; marked as dead
+// %1<def,dead> = ... ; marked as dead
// ...
-// vreg1<def> = A2_tfrf ... ; if A2_tfrt is removed
-// This case happens if vreg1 was used as a source in A2_tfrt, which means
+// %1<def> = A2_tfrf ... ; if A2_tfrt is removed
+// This case happens if %1 was used as a source in A2_tfrt, which means
// that is it actually live at the A2_tfrf, and so the now dead definition
-// of vreg1 will need to be updated to non-dead at some point.
+// of %1 will need to be updated to non-dead at some point.
//
// This issue could be remedied by adding implicit uses to the predicated
// transfers, but this will create a problem with subsequent predication,
@@ -760,8 +760,8 @@ MachineInstr *HexagonExpandCondsets::getReachingDefForPred(RegisterRef RD,
if (RR.Reg != RD.Reg)
continue;
// If the "Reg" part agrees, there is still the subregister to check.
- // If we are looking for vreg1:loreg, we can skip vreg1:hireg, but
- // not vreg1 (w/o subregisters).
+ // If we are looking for %1:loreg, we can skip %1:hireg, but
+ // not %1 (w/o subregisters).
if (RR.Sub == RD.Sub)
return MI;
if (RR.Sub == 0 || RD.Sub == 0)
@@ -1071,7 +1071,7 @@ bool HexagonExpandCondsets::predicateInBlock(MachineBasicBlock &B,
bool Done = predicate(*I, (Opc == Hexagon::A2_tfrt), UpdRegs);
if (!Done) {
// If we didn't predicate I, we may need to remove it in case it is
- // an "identity" copy, e.g. vreg1 = A2_tfrt vreg2, vreg1.
+ // an "identity" copy, e.g. %1 = A2_tfrt %2, %1.
if (RegisterRef(I->getOperand(0)) == RegisterRef(I->getOperand(2))) {
for (auto &Op : I->operands())
if (Op.isReg())
@@ -1198,18 +1198,18 @@ bool HexagonExpandCondsets::coalesceSegments(
MachineOperand &S1 = CI->getOperand(2), &S2 = CI->getOperand(3);
bool Done = false;
// Consider this case:
- // vreg1 = instr1 ...
- // vreg2 = instr2 ...
- // vreg0 = C2_mux ..., vreg1, vreg2
- // If vreg0 was coalesced with vreg1, we could end up with the following
+ // %1 = instr1 ...
+ // %2 = instr2 ...
+ // %0 = C2_mux ..., %1, %2
+ // If %0 was coalesced with %1, we could end up with the following
// code:
- // vreg0 = instr1 ...
- // vreg2 = instr2 ...
- // vreg0 = A2_tfrf ..., vreg2
+ // %0 = instr1 ...
+ // %2 = instr2 ...
+ // %0 = A2_tfrf ..., %2
// which will later become:
- // vreg0 = instr1 ...
- // vreg0 = instr2_cNotPt ...
- // i.e. there will be an unconditional definition (instr1) of vreg0
+ // %0 = instr1 ...
+ // %0 = instr2_cNotPt ...
+ // i.e. there will be an unconditional definition (instr1) of %0
// followed by a conditional one. The output dependency was there before
// and it unavoidable, but if instr1 is predicable, we will no longer be
// able to predicate it here.
diff --git a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
index 09d3e6d4a15..d1f63699292 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -1106,10 +1106,10 @@ void HexagonGenInsert::pruneCoveredSets(unsigned VR) {
// Now, remove those whose sets of potentially removable registers are
// contained in another IF candidate for VR. For example, given these
- // candidates for vreg45,
- // %vreg45:
- // (%vreg44,%vreg41,#9,#8), { %vreg42 }
- // (%vreg43,%vreg41,#9,#8), { %vreg42 %vreg44 }
+ // candidates for %45,
+ // %45:
+ // (%44,%41,#9,#8), { %42 }
+ // (%43,%41,#9,#8), { %42 %44 }
// remove the first one, since it is contained in the second one.
for (unsigned i = 0, n = LL.size(); i < n; ) {
const RegisterSet &RMi = LL[i].second;
diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 56171f22148..5c18cc8732d 100644
--- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1622,8 +1622,8 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
RegisterInductionSet IndRegs;
// Look for induction patterns:
- // vreg1 = PHI ..., [ latch, vreg2 ]
- // vreg2 = ADD vreg1, imm
+ // %1 = PHI ..., [ latch, %2 ]
+ // %2 = ADD %1, imm
using instr_iterator = MachineBasicBlock::instr_iterator;
for (instr_iterator I = Header->instr_begin(), E = Header->instr_end();
@@ -1720,7 +1720,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
MachineOperand &MO = PredDef->getOperand(i);
if (MO.isReg()) {
// Skip all implicit references. In one case there was:
- // %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %usr<imp-use>
+ // %140<def> = FCMPUGT32_rr %138, %139, %usr<imp-use>
if (MO.isImplicit())
continue;
if (MO.isUse()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
index 0ef0e78c524..354bb95e448 100644
--- a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -8,27 +8,27 @@
// This peephole pass optimizes in the following cases.
// 1. Optimizes redundant sign extends for the following case
// Transform the following pattern
-// %vreg170<def> = SXTW %vreg166
+// %170<def> = SXTW %166
// ...
-// %vreg176<def> = COPY %vreg170:isub_lo
+// %176<def> = COPY %170:isub_lo
//
// Into
-// %vreg176<def> = COPY vreg166
+// %176<def> = COPY %166
//
// 2. Optimizes redundant negation of predicates.
-// %vreg15<def> = CMPGTrr %vreg6, %vreg2
+// %15<def> = CMPGTrr %6, %2
// ...
-// %vreg16<def> = NOT_p %vreg15<kill>
+// %16<def> = NOT_p %15<kill>
// ...
-// JMP_c %vreg16<kill>, <BB#1>, %pc<imp-def,dead>
+// JMP_c %16<kill>, <BB#1>, %pc<imp-def,dead>
//
// Into
-// %vreg15<def> = CMPGTrr %vreg6, %vreg2;
+// %15<def> = CMPGTrr %6, %2;
// ...
-// JMP_cNot %vreg15<kill>, <BB#1>, %pc<imp-def,dead>;
+// JMP_cNot %15<kill>, <BB#1>, %pc<imp-def,dead>;
//
// Note: The peephole pass makes the instrucstions like
-// %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill>
+// %170<def> = SXTW %166 or %16<def> = NOT_p %15<kill>
// redundant and relies on some form of dead removal instructions, like
// DCE or DIE to actually eliminate them.
@@ -133,7 +133,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
NextI = std::next(I);
MachineInstr &MI = *I;
// Look for sign extends:
- // %vreg170<def> = SXTW %vreg166
+ // %170<def> = SXTW %166
if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) {
assert(MI.getNumOperands() == 2);
MachineOperand &Dst = MI.getOperand(0);
@@ -144,14 +144,14 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Map the following:
- // %vreg170<def> = SXTW %vreg166
- // PeepholeMap[170] = vreg166
+ // %170<def> = SXTW %166
+ // PeepholeMap[170] = %166
PeepholeMap[DstReg] = SrcReg;
}
}
- // Look for %vreg170<def> = COMBINE_ir_V4 (0, %vreg169)
- // %vreg170:DoublRegs, %vreg169:IntRegs
+ // Look for %170<def> = COMBINE_ir_V4 (0, %169)
+ // %170:DoublRegs, %169:IntRegs
if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) {
assert(MI.getNumOperands() == 3);
MachineOperand &Dst = MI.getOperand(0);
@@ -165,10 +165,10 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
}
// Look for this sequence below
- // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32
- // %vregIntReg = COPY %vregDoubleReg1:isub_lo.
+ // %DoubleReg1 = LSRd_ri %DoubleReg0, 32
+ // %IntReg = COPY %DoubleReg1:isub_lo.
// and convert into
- // %vregIntReg = COPY %vregDoubleReg0:isub_hi.
+ // %IntReg = COPY %DoubleReg0:isub_hi.
if (MI.getOpcode() == Hexagon::S2_lsr_i_p) {
assert(MI.getNumOperands() == 3);
MachineOperand &Dst = MI.getOperand(0);
@@ -193,14 +193,14 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Map the following:
- // %vreg170<def> = NOT_xx %vreg166
- // PeepholeMap[170] = vreg166
+ // %170<def> = NOT_xx %166
+ // PeepholeMap[170] = %166
PeepholeMap[DstReg] = SrcReg;
}
}
// Look for copy:
- // %vreg176<def> = COPY %vreg170:isub_lo
+ // %176<def> = COPY %170:isub_lo
if (!DisableOptSZExt && MI.isCopy()) {
assert(MI.getNumOperands() == 2);
MachineOperand &Dst = MI.getOperand(0);
diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
index d1816cbc752..fb3e6a0fb10 100644
--- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -9,10 +9,10 @@
// Replace sequences of "narrow" stores to adjacent memory locations with
// a fewer "wide" stores that have the same effect.
// For example, replace:
-// S4_storeirb_io %vreg100, 0, 0 ; store-immediate-byte
-// S4_storeirb_io %vreg100, 1, 0 ; store-immediate-byte
+// S4_storeirb_io %100, 0, 0 ; store-immediate-byte
+// S4_storeirb_io %100, 1, 0 ; store-immediate-byte
// with
-// S4_storeirh_io %vreg100, 0, 0 ; store-immediate-halfword
+// S4_storeirh_io %100, 0, 0 ; store-immediate-halfword
// The above is the general idea. The actual cases handled by the code
// may be a bit more complex.
// The purpose of this pass is to reduce the number of outstanding stores,
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 7eed2898f61..7596bb5a435 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -223,8 +223,8 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAG) {
// both the return value and the argument for the next call being in %r0.
// Example:
// 1: <call1>
- // 2: %vregX = COPY %r0
- // 3: <use of %vregX>
+ // 2: %vreg = COPY %r0
+ // 3: <use of %vreg>
// 4: %r0 = ...
// 5: <call2>
// The scheduler would often swap 3 and 4, so an additional register is
@@ -234,12 +234,12 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAG) {
const MachineInstr *MI = DAG->SUnits[su].getInstr();
if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) ||
MI->readsRegister(Hexagon::V0, &TRI))) {
- // %vregX = COPY %r0
+ // %vreg = COPY %r0
VRegHoldingRet = MI->getOperand(0).getReg();
RetRegister = MI->getOperand(1).getReg();
LastUseOfRet = nullptr;
} else if (VRegHoldingRet && MI->readsVirtualRegister(VRegHoldingRet))
- // <use of %vregX>
+ // <use of %X>
LastUseOfRet = &DAG->SUnits[su];
else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI))
// %r0 = ...
diff --git a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
index 7258e818e72..f33655a16c2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -22,11 +22,11 @@
// This peephole pass optimizes these cases, for example
//
// It will transform the following pattern
-// %vreg0<def> = LEA_ADDRi64 %VRFrame, 4
-// %vreg1<def> = cvta_to_local_yes_64 %vreg0
+// %0<def> = LEA_ADDRi64 %VRFrame, 4
+// %1<def> = cvta_to_local_yes_64 %0
//
// into
-// %vreg1<def> = LEA_ADDRi64 %VRFrameLocal, 4
+// %1<def> = LEA_ADDRi64 %VRFrameLocal, 4
//
// %VRFrameLocal is the virtual register name of %SPL
//
diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index 2af1913db55..4c101f58601 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -62,11 +62,11 @@ namespace llvm {
/// BB#0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %vreg0<def> = COPY %f1; F8RC:%vreg0
-/// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
-/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
-/// BCC 76, %vreg5, <BB#2>; CRRC:%vreg5
+/// %0<def> = COPY %f1; F8RC:%0
+/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
+/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
+/// BCC 76, %5, <BB#2>; CRRC:%5
/// Successors according to CFG: BB#1(?%) BB#2(?%)
///
/// BB#1: derived from LLVM BB %entry
@@ -75,10 +75,10 @@ namespace llvm {
///
/// BB#2: derived from LLVM BB %entry
/// Predecessors according to CFG: BB#0 BB#1
-/// %vreg9<def> = PHI %vreg8, <BB#1>, %vreg0, <BB#0>;
-/// F8RC:%vreg9,%vreg8,%vreg0
+/// %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+/// F8RC:%9,%8,%0
/// <SNIP2>
-/// BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
+/// BCC 76, %5, <BB#4>; CRRC:%5
/// Successors according to CFG: BB#3(?%) BB#4(?%)
///
/// BB#3: derived from LLVM BB %entry
@@ -87,8 +87,8 @@ namespace llvm {
///
/// BB#4: derived from LLVM BB %entry
/// Predecessors according to CFG: BB#2 BB#3
-/// %vreg13<def> = PHI %vreg12, <BB#3>, %vreg2, <BB#2>;
-/// F8RC:%vreg13,%vreg12,%vreg2
+/// %13<def> = PHI %12, <BB#3>, %2, <BB#2>;
+/// F8RC:%13,%12,%2
/// <SNIP3>
/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
@@ -100,12 +100,12 @@ namespace llvm {
/// BB#0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %vreg0<def> = COPY %f1; F8RC:%vreg0
-/// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
-/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
+/// %0<def> = COPY %f1; F8RC:%0
+/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
+/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// <SNIP2>
-/// BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
+/// BCC 76, %5, <BB#4>; CRRC:%5
/// Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%)
/// BB#4(0x55555554 / 0x80000000 = 66.67%)
///
@@ -115,10 +115,10 @@ namespace llvm {
///
/// BB#4: derived from LLVM BB %entry
/// Predecessors according to CFG: BB#0 BB#1
-/// %vreg9<def> = PHI %vreg8, <BB#1>, %vreg0, <BB#0>;
-/// F8RC:%vreg9,%vreg8,%vreg0
-/// %vreg13<def> = PHI %vreg12, <BB#1>, %vreg2, <BB#0>;
-/// F8RC:%vreg13,%vreg12,%vreg2
+/// %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+/// F8RC:%9,%8,%0
+/// %13<def> = PHI %12, <BB#1>, %2, <BB#0>;
+/// F8RC:%13,%12,%2
/// <SNIP3>
/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index fd566634760..15cc1c76760 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2318,7 +2318,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
// ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
// BL8_NOP <ga:@func>,...
// ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
- // %vreg5<def> = COPY %x3; G8RC:%vreg5
+ // %5<def> = COPY %x3; G8RC:%5
if (SrcReg == PPC::X3) {
const MachineBasicBlock *MBB = MI.getParent();
MachineBasicBlock::const_instr_iterator II =
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index a8d98133afc..1ac7afe2cdc 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -585,9 +585,9 @@ bool PPCMIPeephole::simplifyCode(void) {
// We can eliminate RLDICL (e.g. for zero-extension)
// if all bits to clear are already zero in the input.
// This code assume following code sequence for zero-extension.
- // %vreg6<def> = COPY %vreg5:sub_32; (optional)
- // %vreg8<def> = IMPLICIT_DEF;
- // %vreg7<def,tied1> = INSERT_SUBREG %vreg8<tied0>, %vreg6, sub_32;
+ // %6<def> = COPY %5:sub_32; (optional)
+ // %8<def> = IMPLICIT_DEF;
+ // %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
if (!EnableZExtElimination) break;
if (MI.getOperand(2).getImm() != 0)
@@ -685,8 +685,8 @@ bool PPCMIPeephole::simplifyCode(void) {
DEBUG(dbgs() << "Optimizing LI to ADDI: ");
DEBUG(LiMI->dump());
- // There could be repeated registers in the PHI, e.g: %vreg1<def> =
- // PHI %vreg6, <BB#2>, %vreg8, <BB#3>, %vreg8, <BB#6>; So if we've
+ // There could be repeated registers in the PHI, e.g: %1<def> =
+ // PHI %6, <BB#2>, %8, <BB#3>, %8, <BB#6>; So if we've
// already replaced the def instruction, skip.
if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
continue;
diff --git a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 80b63b1c9df..4d001c0210d 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -90,21 +90,21 @@ protected:
// This pass is run after register coalescing, and so we're looking for
// a situation like this:
// ...
- // %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
- // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // %5<def> = COPY %9; VSLRC:%5,%9
+ // %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
+ // %rm<imp-use>; VSLRC:%5,%17,%16
// ...
- // %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
- // %rm<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
+ // %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
+ // %rm<imp-use>; VSLRC:%9,%17,%19
// ...
// Where we can eliminate the copy by changing from the A-type to the
// M-type instruction. Specifically, for this example, this means:
- // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
+ // %rm<imp-use>; VSLRC:%5,%17,%16
// is replaced by:
- // %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
- // %rm<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
- // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+ // %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
+ // %rm<imp-use>; VSLRC:%16,%18,%9
+ // and we remove: %5<def> = COPY %9; VSLRC:%5,%9
SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
@@ -150,13 +150,13 @@ protected:
// walking the MIs we may as well test liveness here.
//
// FIXME: There is a case that occurs in practice, like this:
- // %vreg9<def> = COPY %f1; VSSRC:%vreg9
+ // %9<def> = COPY %f1; VSSRC:%9
// ...
- // %vreg6<def> = COPY %vreg9; VSSRC:%vreg6,%vreg9
- // %vreg7<def> = COPY %vreg9; VSSRC:%vreg7,%vreg9
- // %vreg9<def,tied1> = XSMADDASP %vreg9<tied0>, %vreg1, %vreg4; VSSRC:
- // %vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg1, %vreg2; VSSRC:
- // %vreg7<def,tied1> = XSMADDASP %vreg7<tied0>, %vreg1, %vreg3; VSSRC:
+ // %6<def> = COPY %9; VSSRC:%6,%9
+ // %7<def> = COPY %9; VSSRC:%7,%9
+ // %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
+ // %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
+ // %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
// which prevents an otherwise-profitable transformation.
bool OtherUsers = false, KillsAddendSrc = false;
for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
@@ -177,11 +177,11 @@ protected:
// The transformation doesn't work well with things like:
- // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
- // unless vreg11 is also a kill, so skip when it is not,
+ // %5 = A-form-op %5, %11, %5;
+ // unless %11 is also a kill, so skip when it is not,
// and check operand 3 to see it is also a kill to handle the case:
- // %vreg5 = A-form-op %vreg5, %vreg5, %vreg11;
- // where vreg5 and vreg11 are both kills. This case would be skipped
+ // %5 = A-form-op %5, %5, %11;
+ // where %5 and %11 are both kills. This case would be skipped
// otherwise.
unsigned OldFMAReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a21145f0755..54523d7233e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -6948,10 +6948,10 @@ static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
// For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
// lowered this:
- // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
+ // (extract_vector_elt (v8f32 %1), Constant<6>)
// to:
// (extract_vector_elt (vector_shuffle<2,u,u,u>
- // (extract_subvector (v8f32 %vreg0), Constant<4>),
+ // (extract_subvector (v8f32 %0), Constant<4>),
// undef)
// Constant<0>)
// In this case the vector is the extract_subvector expression and the index
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index dca9d622730..aa81c3aff89 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -43,7 +43,7 @@ define [1 x double] @constant() {
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %vreg6, %vreg2; mem:ST4[%addr] GPR:%vreg6,%vreg2 (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -63,7 +63,7 @@ false:
}
; General legalizer inability to handle types whose size wasn't a power of 2.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST6[%addr](align=8) (in function: odd_type)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST6[%addr](align=8) (in function: odd_type)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
define void @odd_type(i42* %addr) {
@@ -72,7 +72,7 @@ define void @odd_type(i42* %addr) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST28[%addr](align=32) (in function: odd_vector)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
define void @odd_vector(<7 x i32>* %addr) {
@@ -91,7 +91,7 @@ define i128 @sequence_sizes([8 x i8] %in) {
}
; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %vreg2<def>(s64) = G_LOAD %vreg0; mem:LD8[%addr] GPR:%vreg2,%vreg0 (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
define i64 @atomic_ops(i64* %addr) {
@@ -132,14 +132,14 @@ continue:
}
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(s128) = G_FCONSTANT quad 2
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(s128) = G_FCONSTANT quad 2
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_quad_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_quad_dump:
define fp128 @test_quad_dump() {
ret fp128 0xL00000000000000004000000000000000
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(p0) = G_EXTRACT_VECTOR_ELT %1, %2; (in function: vector_of_pointers_extractelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
@var = global <2 x i16*> zeroinitializer
@@ -156,7 +156,7 @@ end:
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
define void @vector_of_pointers_insertelement() {
@@ -172,7 +172,7 @@ end:
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
%struct96 = type { float, float, float }
@@ -182,7 +182,7 @@ define void @nonpow2_insertvalue_narrowing(float %a) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing:
define void @nonpow2_add_narrowing() {
@@ -193,7 +193,7 @@ define void @nonpow2_add_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing:
define void @nonpow2_or_narrowing() {
@@ -204,7 +204,7 @@ define void @nonpow2_or_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing:
define void @nonpow2_load_narrowing() {
@@ -213,7 +213,7 @@ define void @nonpow2_load_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
define void @nonpow2_store_narrowing(i96* %c) {
@@ -223,7 +223,7 @@ define void @nonpow2_store_narrowing(i96* %c) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
define void @nonpow2_constant_narrowing() {
@@ -233,8 +233,8 @@ define void @nonpow2_constant_narrowing() {
; Currently can't handle vector lengths that aren't an exact multiple of
; natively supported vector lengths. Test that the fall-back works for those.
-; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(<7 x s64>) = G_ADD %vreg0, %vreg0; (in function: nonpow2_vector_add_fewerelements
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg2<def>(s64) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg3; (in function: nonpow2_vector_add_fewerelements)
+; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1<def>(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2<def>(s64) = G_EXTRACT_VECTOR_ELT %1, %3; (in function: nonpow2_vector_add_fewerelements)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
define void @nonpow2_vector_add_fewerelements() {
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir b/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
index 9a2f7f7e54f..7d0c9a37d17 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
@@ -9,8 +9,8 @@
...
---
# CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function ***
-# CHECK: instruction: %vreg0<def>(s64) = COPY
-# CHECK: operand 0: %vreg0<def>
+# CHECK: instruction: %0<def>(s64) = COPY
+# CHECK: operand 0: %0<def>
name: test
regBankSelected: true
registers:
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir b/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
index 2149903d08a..a182cf53173 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
@@ -22,11 +22,11 @@ body: |
%0 = COPY %x0
; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
- ; CHECK: instruction: %vreg1<def> = G_ADD
+ ; CHECK: instruction: %1<def> = G_ADD
%1 = G_ADD %0, %0
; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function ***
- ; CHECK: instruction: %vreg2<def>(s64) = COPY
- ; CHECK: operand 0: %vreg2<def>
+ ; CHECK: instruction: %2<def>(s64) = COPY
+ ; CHECK: operand 0: %2<def>
%2(s64) = COPY %x0
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
index 25cf313b81e..0ee32f79a35 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
@@ -5,10 +5,10 @@
; CHECK-LABEL: stp_i64_scale:BB#0
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(4): STRXui %vreg1, %vreg0, 1
-; CHECK:SU(3): STRXui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRXui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRXui %vreg1, %vreg0, 4
+; CHECK:SU(4): STRXui %1, %0, 1
+; CHECK:SU(3): STRXui %1, %0, 2
+; CHECK:SU(2): STRXui %1, %0, 3
+; CHECK:SU(5): STRXui %1, %0, 4
define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 3
@@ -26,10 +26,10 @@ entry:
; CHECK-LABEL: stp_i32_scale:BB#0
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(4): STRWui %vreg1, %vreg0, 1
-; CHECK:SU(3): STRWui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRWui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRWui %vreg1, %vreg0, 4
+; CHECK:SU(4): STRWui %1, %0, 1
+; CHECK:SU(3): STRWui %1, %0, 2
+; CHECK:SU(2): STRWui %1, %0, 3
+; CHECK:SU(5): STRWui %1, %0, 4
define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) {
entry:
%arrayidx = getelementptr inbounds i32, i32* %P, i32 3
@@ -47,10 +47,10 @@ entry:
; CHECK-LABEL:stp_i64_unscale:BB#0 entry
; CHECK:Cluster ld/st SU(5) - SU(2)
; CHECK:Cluster ld/st SU(4) - SU(3)
-; CHECK:SU(5): STURXi %vreg1, %vreg0, -32
-; CHECK:SU(2): STURXi %vreg1, %vreg0, -24
-; CHECK:SU(4): STURXi %vreg1, %vreg0, -16
-; CHECK:SU(3): STURXi %vreg1, %vreg0, -8
+; CHECK:SU(5): STURXi %1, %0, -32
+; CHECK:SU(2): STURXi %1, %0, -24
+; CHECK:SU(4): STURXi %1, %0, -16
+; CHECK:SU(3): STURXi %1, %0, -8
define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 -3
@@ -68,10 +68,10 @@ entry:
; CHECK-LABEL:stp_i32_unscale:BB#0 entry
; CHECK:Cluster ld/st SU(5) - SU(2)
; CHECK:Cluster ld/st SU(4) - SU(3)
-; CHECK:SU(5): STURWi %vreg1, %vreg0, -16
-; CHECK:SU(2): STURWi %vreg1, %vreg0, -12
-; CHECK:SU(4): STURWi %vreg1, %vreg0, -8
-; CHECK:SU(3): STURWi %vreg1, %vreg0, -4
+; CHECK:SU(5): STURWi %1, %0, -16
+; CHECK:SU(2): STURWi %1, %0, -12
+; CHECK:SU(4): STURWi %1, %0, -8
+; CHECK:SU(3): STURWi %1, %0, -4
define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 {
entry:
%arrayidx = getelementptr inbounds i32, i32* %P, i32 -3
@@ -89,10 +89,10 @@ entry:
; CHECK-LABEL:stp_double:BB#0
; CHECK:Cluster ld/st SU(3) - SU(4)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(3): STRDui %vreg1, %vreg0, 1
-; CHECK:SU(4): STRDui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRDui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRDui %vreg1, %vreg0, 4
+; CHECK:SU(3): STRDui %1, %0, 1
+; CHECK:SU(4): STRDui %1, %0, 2
+; CHECK:SU(2): STRDui %1, %0, 3
+; CHECK:SU(5): STRDui %1, %0, 4
define void @stp_double(double* nocapture %P, double %v) {
entry:
%arrayidx = getelementptr inbounds double, double* %P, i64 3
@@ -110,10 +110,10 @@ entry:
; CHECK-LABEL:stp_float:BB#0
; CHECK:Cluster ld/st SU(3) - SU(4)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(3): STRSui %vreg1, %vreg0, 1
-; CHECK:SU(4): STRSui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRSui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRSui %vreg1, %vreg0, 4
+; CHECK:SU(3): STRSui %1, %0, 1
+; CHECK:SU(4): STRSui %1, %0, 2
+; CHECK:SU(2): STRSui %1, %0, 3
+; CHECK:SU(5): STRSui %1, %0, 4
define void @stp_float(float* nocapture %P, float %v) {
entry:
%arrayidx = getelementptr inbounds float, float* %P, i64 3
@@ -130,10 +130,10 @@ entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: stp_volatile:BB#0
; CHECK-NOT: Cluster ld/st
-; CHECK:SU(2): STRXui %vreg1, %vreg0, 3; mem:Volatile
-; CHECK:SU(3): STRXui %vreg1, %vreg0, 2; mem:Volatile
-; CHECK:SU(4): STRXui %vreg1, %vreg0, 1; mem:Volatile
-; CHECK:SU(5): STRXui %vreg1, %vreg0, 4; mem:Volatile
+; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile
+; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile
+; CHECK:SU(4): STRXui %1, %0, 1; mem:Volatile
+; CHECK:SU(5): STRXui %1, %0, 4; mem:Volatile
define i64 @stp_volatile(i64* nocapture %P, i64 %v) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 3
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
index 05aa96997b5..58f414432ac 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
@@ -4,9 +4,9 @@
; CHECK-SSA-LABEL: Machine code for function t1
-; CHECK-SSA: [[QUOTREG:%vreg[0-9]+]]<def> = SDIVWr
+; CHECK-SSA: [[QUOTREG:%[0-9]+]]<def> = SDIVWr
; CHECK-SSA-NOT: [[QUOTREG]]<def> =
-; CHECK-SSA: {{%vreg[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
+; CHECK-SSA: {{%[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
; CHECK-SSA-LABEL: Machine code for function t2
diff --git a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
index 64e535ca749..ca50e110a88 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
@@ -6,13 +6,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldr_int:BB#0
; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int:BB#0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
define i32 @ldr_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load i32, i32* %p1, align 2
@@ -26,13 +26,13 @@ define i32 @ldr_int(i32* %a) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_sext_int:BB#0
; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_sext_int:BB#0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRSWui
define i64 @ldp_sext_int(i32* %p) nounwind {
%tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
@@ -47,13 +47,13 @@ define i64 @ldp_sext_int(i32* %p) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldur_int:BB#0
; CHECK: Cluster ld/st SU(2) - SU(1)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDURWi
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDURWi
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldur_int:BB#0
; EXYNOS: Cluster ld/st SU(2) - SU(1)
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDURWi
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDURWi
define i32 @ldur_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
%tmp1 = load i32, i32* %p1, align 2
@@ -67,13 +67,13 @@ define i32 @ldur_int(i32* %a) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_half_sext_zext_int:BB#0
; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3): %vreg{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(4): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(3): %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3): %vreg{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(4): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(3): %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@@ -90,13 +90,13 @@ define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_half_zext_sext_int:BB#0
; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; CHECK: SU(4): %vreg{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(4): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; EXYNOS: SU(4): %vreg{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(4): %{{[0-9]+}}<def> = LDRSWui
define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@@ -113,13 +113,13 @@ define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldr_int_volatile:BB#0
; CHECK-NOT: Cluster ld/st
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int_volatile:BB#0
; EXYNOS-NOT: Cluster ld/st
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
define i32 @ldr_int_volatile(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load volatile i32, i32* %p1, align 2
@@ -133,8 +133,8 @@ define i32 @ldr_int_volatile(i32* %a) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldq_cluster:BB#0
; CHECK: Cluster ld/st SU(1) - SU(3)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRQui
-; CHECK: SU(3): %vreg{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(3): %{{[0-9]+}}<def> = LDRQui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldq_cluster:BB#0
; EXYNOS-NOT: Cluster ld/st
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
index ad4feef7280..b4e07fe76c1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
@@ -6,10 +6,10 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: shiftable
-; CHECK: SU(2): %vreg2<def> = SUBXri %vreg1, 20, 0
+; CHECK: SU(2): %2<def> = SUBXri %1, 20, 0
; CHECK: Successors:
-; CHECK-NEXT: SU(4): Data Latency=1 Reg=%vreg2
-; CHECK-NEXT: SU(3): Data Latency=2 Reg=%vreg2
+; CHECK-NEXT: SU(4): Data Latency=1 Reg=%2
+; CHECK-NEXT: SU(3): Data Latency=2 Reg=%2
; CHECK: ********** INTERVALS **********
define i64 @shiftable(i64 %A, i64 %B) {
%tmp0 = sub i64 %B, 20
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index 1b102e63569..b2bfc13967a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -5,15 +5,15 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: misched_bug:BB#0 entry
-; CHECK: SU(2): %vreg2<def> = LDRWui %vreg0, 1; mem:LD4[%ptr1_plus1] GPR32:%vreg2 GPR64common:%vreg0
+; CHECK: SU(2): %2<def> = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
; CHECK: Successors:
-; CHECK-NEXT: SU(5): Data Latency=4 Reg=%vreg2
+; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2
; CHECK-NEXT: SU(4): Ord Latency=0
-; CHECK: SU(3): STRWui %wzr, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0
+; CHECK: SU(3): STRWui %wzr, %0, 0; mem:ST4[%ptr1] GPR64common:%0
; CHECK: Successors:
; CHECK: SU(4): Ord Latency=0
-; CHECK: SU(4): STRWui %wzr, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1
-; CHECK: SU(5): %w0<def> = COPY %vreg2; GPR32:%vreg2
+; CHECK: SU(4): STRWui %wzr, %1, 0; mem:ST4[%ptr2] GPR64common:%1
+; CHECK: SU(5): %w0<def> = COPY %2; GPR32:%2
; CHECK: ** ScheduleDAGMI::schedule picking next node
define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
entry:
diff --git a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
index 7e76dac214a..cb42fcced8d 100644
--- a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
@@ -26,9 +26,9 @@ declare void @callee2(i8*, i8*, i8*, i8*, i8*,
; CHECK: fi#-2: {{.*}} fixed, at location [SP+8]
; CHECK: fi#-1: {{.*}} fixed, at location [SP]
-; CHECK: [[VRA:%vreg.*]]<def> = LDRXui <fi#-1>
-; CHECK: [[VRB:%vreg.*]]<def> = LDRXui <fi#-2>
-; CHECK: STRXui %vreg{{.*}}, <fi#-4>
+; CHECK: [[VRA:%.*]]<def> = LDRXui <fi#-1>
+; CHECK: [[VRB:%.*]]<def> = LDRXui <fi#-2>
+; CHECK: STRXui %{{.*}}, <fi#-4>
; CHECK: STRXui [[VRB]], <fi#-3>
; Make sure that there is an dependence edge between fi#-2 and fi#-4.
@@ -40,5 +40,5 @@ declare void @callee2(i8*, i8*, i8*, i8*, i8*,
; CHECK: SU([[DEPSTOREB:.*]]): Ord Latency=0
; CHECK: SU([[DEPSTOREA:.*]]): Ord Latency=0
-; CHECK: SU([[DEPSTOREA]]): STRXui %vreg{{.*}}, <fi#-4>
-; CHECK: SU([[DEPSTOREB]]): STRXui %vreg{{.*}}, <fi#-3>
+; CHECK: SU([[DEPSTOREA]]): STRXui %{{.*}}, <fi#-4>
+; CHECK: SU([[DEPSTOREB]]): STRXui %{{.*}}, <fi#-3>
diff --git a/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll b/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll
index e5df12a1e5a..f8fb12eefa6 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll
@@ -46,20 +46,20 @@ declare void @llvm.r600.group.barrier() nounwind convergent
;
; The instruction selection phase will generate ISA that looks like this:
; %oqap = LDS_READ_RET
-; %vreg0 = MOV %oqap
-; %vreg1 = VTX_READ_32
-; %vreg2 = ADD_INT %vreg1, %vreg0
+; %0 = MOV %oqap
+; %1 = VTX_READ_32
+; %2 = ADD_INT %1, %0
;
; The bottom scheduler will schedule the two ALU instructions first:
;
; UNSCHEDULED:
; %oqap = LDS_READ_RET
-; %vreg1 = VTX_READ_32
+; %1 = VTX_READ_32
;
; SCHEDULED:
;
-; vreg0 = MOV %oqap
-; vreg2 = ADD_INT %vreg1, %vreg2
+; %0 = MOV %oqap
+; %2 = ADD_INT %1, %2
;
; The lack of proper aliasing results in the local memory read (LDS_READ_RET)
; to consider the global memory read (VTX_READ_32) has a chain dependency, so
@@ -69,10 +69,10 @@ declare void @llvm.r600.group.barrier() nounwind convergent
; Alu clause:
; %oqap = LDS_READ_RET
; VTX clause:
-; %vreg1 = VTX_READ_32
+; %1 = VTX_READ_32
; Alu clause:
-; vreg0 = MOV %oqap
-; vreg2 = ADD_INT %vreg1, %vreg2
+; %0 = MOV %oqap
+; %2 = ADD_INT %1, %2
;
; This is an illegal program because the oqap def and use know occur in
; different ALU clauses.
diff --git a/llvm/test/CodeGen/AMDGPU/liveness.mir b/llvm/test/CodeGen/AMDGPU/liveness.mir
index 6fd8466492d..8bb946da9ad 100644
--- a/llvm/test/CodeGen/AMDGPU/liveness.mir
+++ b/llvm/test/CodeGen/AMDGPU/liveness.mir
@@ -6,7 +6,7 @@
# liveranges needed it.
#
# Should see three distinct value numbers:
-# CHECK: %vreg0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
+# CHECK: %0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
--- |
define amdgpu_kernel void @test0() { ret void }
...
diff --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
index aceac34f286..1e9b6b5dd8d 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
@@ -2,7 +2,7 @@
# https://bugs.llvm.org/show_bug.cgi?id=33620
---
-# This would assert due to the empty live interval created for %vreg9
+# This would assert due to the empty live interval created for %9
# on the last S_NOP with an undef subreg use.
# CHECK-LABEL: name: expecting_non_empty_interval
diff --git a/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir b/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
index 62816da25b2..2d353b8138e 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
@@ -2,11 +2,11 @@
# REQUIRES: asserts
# CHECK: INTERVALS
-# CHECK: vreg0
+# CHECK: %0
# CHECK-LABEL: Machine code for function test0:
# CHECK: INTERVALS
-# CHECK: vreg0
+# CHECK: %0
# CHECK-LABEL: Machine code for function test1:
--- |
diff --git a/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll b/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
index 7f4057143a0..5e71eeb9c3d 100644
--- a/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
+++ b/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
@@ -5,11 +5,11 @@ target triple = "thumbv7-apple-ios"
; This test calls shrinkToUses with an early-clobber redefined live range during
; spilling.
;
-; Shrink: %vreg47,1.158257e-02 = [384r,400e:0)[400e,420r:1) 0@384r 1@400e
+; Shrink: %47,1.158257e-02 = [384r,400e:0)[400e,420r:1) 0@384r 1@400e
;
; The early-clobber instruction is an str:
;
-; %vreg12<earlyclobber,def> = t2STR_PRE %vreg6, %vreg12, 32, pred:14, pred:%noreg
+; %12<earlyclobber,def> = t2STR_PRE %6, %12, 32, pred:14, pred:%noreg
;
; This tests that shrinkToUses handles the EC redef correctly.
diff --git a/llvm/test/CodeGen/ARM/Windows/dbzchk.ll b/llvm/test/CodeGen/ARM/Windows/dbzchk.ll
index aea37992de4..afe30b28a27 100644
--- a/llvm/test/CodeGen/ARM/Windows/dbzchk.ll
+++ b/llvm/test/CodeGen/ARM/Windows/dbzchk.ll
@@ -119,7 +119,7 @@ attributes #0 = { optsize }
; CHECK-CFG-DAG: t2B <BB#3>
; CHECK-CFG-DAG: BB#2
-; CHECK-CFG-DAG: tCMPi8 %vreg{{[0-9]}}, 0
+; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0
; CHECK-CFG-DAG: t2Bcc <BB#5>
; CHECK-CFG-DAG: BB#4
diff --git a/llvm/test/CodeGen/ARM/crash-greedy.ll b/llvm/test/CodeGen/ARM/crash-greedy.ll
index 6a58bb871d3..31d6079db71 100644
--- a/llvm/test/CodeGen/ARM/crash-greedy.ll
+++ b/llvm/test/CodeGen/ARM/crash-greedy.ll
@@ -61,7 +61,7 @@ for.end: ; preds = %cond.end
; CHECK: insert_elem
; This test has a sub-register copy with a kill flag:
-; %vreg6:ssub_3<def> = COPY %vreg6:ssub_2<kill>; QPR_VFP2:%vreg6
+; %6:ssub_3<def> = COPY %6:ssub_2<kill>; QPR_VFP2:%6
; The rewriter must do something sensible with that, or the scavenger crashes.
define void @insert_elem() nounwind {
entry:
diff --git a/llvm/test/CodeGen/ARM/misched-copy-arm.ll b/llvm/test/CodeGen/ARM/misched-copy-arm.ll
index 53f8b8d1504..bc20939d0f7 100644
--- a/llvm/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/llvm/test/CodeGen/ARM/misched-copy-arm.ll
@@ -33,9 +33,9 @@ for.end: ; preds = %for.body, %entry
; This case was a crasher in constrainLocalCopy.
; The problem was the t2LDR_PRE defining both the global and local lrg.
; CHECK-LABEL: *** Final schedule for BB#5 ***
-; CHECK: %[[R4:vreg[0-9]+]]<def>, %[[R1:vreg[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
-; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R1]]
-; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R4]]
+; CHECK: %[[R4:[0-9]+]]<def>, %[[R1:[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
+; CHECK: %{{[0-9]+}}<def> = COPY %[[R1]]
+; CHECK: %{{[0-9]+}}<def> = COPY %[[R4]]
; CHECK-LABEL: MACHINEINSTRS
%struct.rtx_def = type { [4 x i8], [1 x %union.rtunion_def] }
%union.rtunion_def = type { i64 }
diff --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index 32d1e03d9a1..9c34e8e6ecc 100644
--- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -37,62 +37,62 @@
}
#
# CHECK: ********** MI Scheduling **********
-# CHECK: SU(2): %vreg2<def> = t2MOVi32imm <ga:@g1>; rGPR:%vreg2
+# CHECK: SU(2): %2<def> = t2MOVi32imm <ga:@g1>; rGPR:%2
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 2
# CHECK_R52: Latency : 2
#
-# CHECK: SU(3): %vreg3<def> = t2LDRi12 %vreg2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%vreg3,%vreg2
+# CHECK: SU(3): %3<def> = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%3,%2
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 3
# CHECK_R52: Latency : 4
#
-# CHECK : SU(6): %vreg6<def> = t2ADDrr %vreg3, %vreg3, pred:14, pred:%noreg, opt:%noreg; rGPR:%vreg6,%vreg3,%vreg3
+# CHECK : SU(6): %6<def> = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg; rGPR:%6,%3,%3
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
-# CHECK: SU(7): %vreg7<def> = t2SDIV %vreg6, %vreg5, pred:14, pred:%noreg; rGPR:%vreg7,%vreg6,%vreg5
+# CHECK: SU(7): %7<def> = t2SDIV %6, %5, pred:14, pred:%noreg; rGPR:%7,%6,%5
# CHECK_A9: Latency : 0
# CHECK_SWIFT: Latency : 14
# CHECK_R52: Latency : 8
-# CHECK: SU(8): t2STRi12 %vreg7, %vreg2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%vreg7,%vreg2
+# CHECK: SU(8): t2STRi12 %7, %2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%7,%2
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 0
# CHECK_R52: Latency : 4
#
-# CHECK: SU(9): %vreg8<def> = t2SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; rGPR:%vreg8,%vreg1,%vreg1
+# CHECK: SU(9): %8<def> = t2SMULBB %1, %1, pred:14, pred:%noreg; rGPR:%8,%1,%1
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(10): %vreg9<def> = t2SMLABB %vreg0, %vreg0, %vreg8, pred:14, pred:%noreg; rGPR:%vreg9,%vreg0,%vreg0,%vreg8
+# CHECK: SU(10): %9<def> = t2SMLABB %0, %0, %8, pred:14, pred:%noreg; rGPR:%9,%0,%0,%8
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %vreg10<def> = t2UXTH %vreg9, 0, pred:14, pred:%noreg; rGPR:%vreg10,%vreg9
+# CHECK: SU(11): %10<def> = t2UXTH %9, 0, pred:14, pred:%noreg; rGPR:%10,%9
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
#
-# CHECK: SU(12): %vreg11<def> = t2MUL %vreg10, %vreg7, pred:14, pred:%noreg; rGPR:%vreg11,%vreg10,%vreg7
+# CHECK: SU(12): %11<def> = t2MUL %10, %7, pred:14, pred:%noreg; rGPR:%11,%10,%7
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(13): %vreg12<def> = t2MLA %vreg11, %vreg11, %vreg11, pred:14, pred:%noreg; rGPR:%vreg12,%vreg11,%vreg11,%vreg11
+# CHECK: SU(13): %12<def> = t2MLA %11, %11, %11, pred:14, pred:%noreg; rGPR:%12,%11,%11,%11
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(14): %vreg13<def>, %vreg14<def> = t2UMULL %vreg12, %vreg12, pred:14, pred:%noreg; rGPR:%vreg13,%vreg14,%vreg12,%vreg12
+# CHECK: SU(14): %13<def>, %14<def> = t2UMULL %12, %12, pred:14, pred:%noreg; rGPR:%13,%14,%12,%12
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(18): %vreg19<def,tied4>, %vreg20<def,tied5> = t2UMLAL %vreg12, %vreg12, %vreg19<tied0>, %vreg20<tied1>, pred:14, pred:%noreg; rGPR:%vreg19,%vreg20,%vreg12,%vreg12,%vreg20
+# CHECK: SU(18): %19<def,tied4>, %20<def,tied5> = t2UMLAL %12, %12, %19<tied0>, %20<tied1>, pred:14, pred:%noreg; rGPR:%19,%20,%12,%12,%20
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 7
# CHECK_R52: Latency : 4
diff --git a/llvm/test/CodeGen/ARM/misched-int-basic.mir b/llvm/test/CodeGen/ARM/misched-int-basic.mir
index d5231269d73..b5d61dfca18 100644
--- a/llvm/test/CodeGen/ARM/misched-int-basic.mir
+++ b/llvm/test/CodeGen/ARM/misched-int-basic.mir
@@ -28,37 +28,37 @@
}
# CHECK: ********** MI Scheduling **********
-# CHECK: SU(2): %vreg2<def> = SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; GPR:%vreg2,%vreg1,%vreg1
+# CHECK: SU(2): %2<def> = SMULBB %1, %1, pred:14, pred:%noreg; GPR:%2,%1,%1
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(3): %vreg3<def> = SMLABB %vreg0, %vreg0, %vreg2, pred:14, pred:%noreg; GPRnopc:%vreg3,%vreg0,%vreg0 GPR:%vreg2
+# CHECK: SU(3): %3<def> = SMLABB %0, %0, %2, pred:14, pred:%noreg; GPRnopc:%3,%0,%0 GPR:%2
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(4): %vreg4<def> = UXTH %vreg3, 0, pred:14, pred:%noreg; GPRnopc:%vreg4,%vreg3
+# CHECK: SU(4): %4<def> = UXTH %3, 0, pred:14, pred:%noreg; GPRnopc:%4,%3
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
#
-# CHECK: SU(5): %vreg5<def> = MUL %vreg4, %vreg4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg5,%vreg4,%vreg4
+# CHECK: SU(5): %5<def> = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%5,%4,%4
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(6): %vreg6<def> = MLA %vreg5, %vreg5, %vreg5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg6,%vreg5,%vreg5,%vreg5
+# CHECK: SU(6): %6<def> = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%6,%5,%5,%5
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(7): %vreg7<def>, %vreg8<def> = UMULL %vreg6, %vreg6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg7,%vreg8,%vreg6,%vreg6
+# CHECK: SU(7): %7<def>, %8<def> = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%7,%8,%6,%6
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %vreg13<def,tied4>, %vreg14<def,tied5> = UMLAL %vreg6, %vreg6, %vreg13<tied0>, %vreg14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%vreg13 GPRnopc:%vreg14,%vreg6,%vreg6
+# CHECK: SU(11): %13<def,tied4>, %14<def,tied5> = UMLAL %6, %6, %13<tied0>, %14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%13 GPRnopc:%14,%6,%6
# CHECK_SWIFT: Latency : 7
# CHECK_A9: Latency : 3
# CHECK_R52: Latency : 4
diff --git a/llvm/test/CodeGen/ARM/single-issue-r52.mir b/llvm/test/CodeGen/ARM/single-issue-r52.mir
index 1eba074dafb..8dfc5df1dec 100644
--- a/llvm/test/CodeGen/ARM/single-issue-r52.mir
+++ b/llvm/test/CodeGen/ARM/single-issue-r52.mir
@@ -20,22 +20,22 @@
# CHECK: ********** MI Scheduling **********
# CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1): %vreg1<def> = VLD4d8Pseudo %vreg0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%vreg1 GPR:%vreg0
+# CHECK: SU(1): %1<def> = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%1 GPR:%0
# CHECK: Latency : 8
# CHECK: Single Issue : true;
-# CHECK: SU(2): %vreg4<def> = VADDv8i8 %vreg1:dsub_0, %vreg1:dsub_1, pred:14, pred:%noreg; DPR:%vreg4 QQPR:%vreg1
+# CHECK: SU(2): %4<def> = VADDv8i8 %1:dsub_0, %1:dsub_1, pred:14, pred:%noreg; DPR:%4 QQPR:%1
# CHECK: Latency : 5
# CHECK: Single Issue : false;
-# CHECK: SU(3): %vreg5<def>, %vreg6<def> = VMOVRRD %vreg4, pred:14, pred:%noreg; GPR:%vreg5,%vreg6 DPR:%vreg4
+# CHECK: SU(3): %5<def>, %6<def> = VMOVRRD %4, pred:14, pred:%noreg; GPR:%5,%6 DPR:%4
# CHECK: Latency : 4
# CHECK: Single Issue : false;
-# TOPDOWN: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# TOPDOWN: Scheduling SU(1) %1<def> = VLD4d8Pseudo
# TOPDOWN: Bump cycle to end group
-# TOPDOWN: Scheduling SU(2) %vreg4<def> = VADDv8i8
+# TOPDOWN: Scheduling SU(2) %4<def> = VADDv8i8
-# BOTTOMUP: Scheduling SU(2) %vreg4<def> = VADDv8i8
-# BOTTOMUP: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# BOTTOMUP: Scheduling SU(2) %4<def> = VADDv8i8
+# BOTTOMUP: Scheduling SU(1) %1<def> = VLD4d8Pseudo
# BOTTOMUP: Bump cycle to begin group
...
diff --git a/llvm/test/CodeGen/ARM/subreg-remat.ll b/llvm/test/CodeGen/ARM/subreg-remat.ll
index d5abfc0af51..616ab1ef7cd 100644
--- a/llvm/test/CodeGen/ARM/subreg-remat.ll
+++ b/llvm/test/CodeGen/ARM/subreg-remat.ll
@@ -4,10 +4,10 @@ target triple = "thumbv7-apple-ios"
;
; The vector %v2 is built like this:
;
-; %vreg6:ssub_1<def> = ...
-; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6
+; %6:ssub_1<def> = ...
+; %6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%6
;
-; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized
+; When %6 spills, the VLDRS constant pool load cannot be rematerialized
; since it implicitly reads the ssub_1 sub-register.
;
; CHECK: f1
@@ -31,7 +31,7 @@ define void @f1(float %x, <2 x float>* %p) {
; because the bits are undef, we should rematerialize. The vector is now built
; like this:
;
-; %vreg2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %vreg2<imp-def>; mem:LD4[ConstantPool]
+; %2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %2<imp-def>; mem:LD4[ConstantPool]
;
; The extra <imp-def> operand indicates that the instruction fully defines the
; virtual register. It doesn't read the old value.
diff --git a/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll b/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
index e6344dfc692..64faff70a33 100644
--- a/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
+++ b/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
@@ -11,10 +11,10 @@
;
; BB#2: derived from LLVM BB %finish
; Predecessors according to CFG: BB#0 BB#1
-; %vreg0<def> = PHI %vreg3, <BB#0>, %vreg5, <BB#1>
-; %vreg7<def> = LDIRdK 2
-; %vreg8<def> = LDIRdK 1
-; CPRdRr %vreg2, %vreg0, %SREG<imp-def>
+; %0<def> = PHI %3, <BB#0>, %5, <BB#1>
+; %7<def> = LDIRdK 2
+; %8<def> = LDIRdK 1
+; CPRdRr %2, %0, %SREG<imp-def>
; BREQk <BB#6>, %SREG<imp-use>
; Successors according to CFG: BB#5(?%) BB#6(?%)
;
diff --git a/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll b/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
index d15b5c964eb..40584cae7b0 100644
--- a/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
@@ -7,10 +7,10 @@ target triple = "hexagon"
; UNREACHABLE executed at llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp:615!
; This happened because after unrolling a loop with a ldd_circ instruction we
; would have several TFCR and ldd_circ instruction sequences.
-; %vreg0 (CRRegs) = TFCR %vreg0 (IntRegs)
-; = ldd_circ( , , vreg0)
-; %vreg1 (CRRegs) = TFCR %vreg1 (IntRegs)
-; = ldd_circ( , , vreg0)
+; %0 (CRRegs) = TFCR %0 (IntRegs)
+; = ldd_circ( , , %0)
+; %1 (CRRegs) = TFCR %1 (IntRegs)
+; = ldd_circ( , , %0)
; The scheduler would move the CRRegs to the top of the loop. The allocator
; would try to spill the CRRegs after running out of them. We don't have code to
; spill CRRegs and the above assertion would be triggered.
diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
index e4c54c4b988..550e5c55550 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
@@ -3,12 +3,12 @@
# Check that coalesced registers are removed from live intervals.
#
-# Check that vreg3 is coalesced into vreg4, and that after coalescing
+# Check that %3 is coalesced into %4, and that after coalescing
# it is no longer in live intervals.
# CHECK-LABEL: After expand-condsets
# CHECK: INTERVALS
-# CHECK-NOT: vreg3
+# CHECK-NOT: %3
# CHECK: MACHINEINSTRS
diff --git a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
index 673a9b41ff2..688a71352cd 100644
--- a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
+++ b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
@@ -3,7 +3,7 @@
; Check that the generated post-increment load has TBAA information.
; CHECK-LABEL: Machine code for function fred:
-; CHECK: = V6_vL32b_pi %vreg{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
+; CHECK: = V6_vL32b_pi %{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll b/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
index 242ee53f19f..6279a2ea6a7 100644
--- a/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
+++ b/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
@@ -36,7 +36,7 @@ entry:
; CHECK-LABEL: SU({{.*}}): SW_RI{{.*}}, 4,
; CHECK: # preds left : 2
; CHECK: # succs left : 0
-; CHECK-LABEL: SU({{.*}}): %vreg{{.*}}<def> = LDW_RI{{.*}}, 12,
+; CHECK-LABEL: SU({{.*}}): %{{.*}}<def> = LDW_RI{{.*}}, 12,
; CHECK: # preds left : 1
; CHECK: # succs left : 4
; CHECK-LABEL: SU({{.*}}): STH_RI{{.*}}, 10,
diff --git a/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir b/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir
index f812bc710aa..8e80828b1ce 100644
--- a/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir
@@ -22,7 +22,7 @@ body: |
...
---
# CHECK-LABEL: name: test_subreg_spill_fold2
-# Similar to test_subreg_spill_fold, but with a vreg0 register class not containing %WZR.
+# Similar to test_subreg_spill_fold, but with a %0 register class not containing %WZR.
name: test_subreg_spill_fold2
registers:
- { id: 0, class: gpr64sp }
diff --git a/llvm/test/CodeGen/PowerPC/quadint-return.ll b/llvm/test/CodeGen/PowerPC/quadint-return.ll
index 2cc995f3f20..e9681071bf2 100644
--- a/llvm/test/CodeGen/PowerPC/quadint-return.ll
+++ b/llvm/test/CodeGen/PowerPC/quadint-return.ll
@@ -14,6 +14,6 @@ entry:
; CHECK: ********** Function: foo
; CHECK: ********** FAST REGISTER ALLOCATION **********
-; CHECK: %x3<def> = COPY %vreg
-; CHECK-NEXT: %x4<def> = COPY %vreg
+; CHECK: %x3<def> = COPY %{{[0-9]+}}
+; CHECK-NEXT: %x4<def> = COPY %{{[0-9]+}}
; CHECK-NEXT: BLR
diff --git a/llvm/test/CodeGen/WebAssembly/dbgvalue.ll b/llvm/test/CodeGen/WebAssembly/dbgvalue.ll
index a90f88ab234..dc108ff9b1f 100644
--- a/llvm/test/CodeGen/WebAssembly/dbgvalue.ll
+++ b/llvm/test/CodeGen/WebAssembly/dbgvalue.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
; CHECK: BB#0
-; CHECK: #DEBUG_VALUE: usage:self <- %vreg4
+; CHECK: #DEBUG_VALUE: usage:self <- %4
; CHECK: BB#1
; CHECK: DW_TAG_variable
source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"
diff --git a/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
index 28802fce5da..6e4fab50ca1 100644
--- a/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
+++ b/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
@@ -2,17 +2,17 @@
;
; Test RegistersDefinedFromSameValue. We have multiple copies of the same vreg:
; while.body85.i:
-; vreg1 = copy vreg2
-; vreg2 = add
+; %1 = copy %2
+; %2 = add
; critical edge from land.lhs.true.i -> if.end117.i:
-; vreg27 = vreg2
+; %27 = %2
; critical edge from land.lhs.true103.i -> if.end117.i:
-; vreg27 = vreg2
+; %27 = %2
; if.then108.i:
-; vreg27 = vreg1
+; %27 = %1
;
; Prior to fixing PR10920 401.bzip miscompile, the coalescer would
-; consider vreg1 and vreg27 to be copies of the same value. It would
+; consider %1 and %27 to be copies of the same value. It would
; then remove one of the critical edge copes, which cannot safely be removed.
; There are two obvious ways the register-allocator could go here, either
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
index b941d495a85..08a4636a7b1 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
@@ -8,7 +8,7 @@
; the fallback path.
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
define void @test_x86_fp80_dump(x86_fp80* %ptr){
diff --git a/llvm/test/CodeGen/X86/cmovcmov.ll b/llvm/test/CodeGen/X86/cmovcmov.ll
index 50860b8d8fd..22c7b3f88dd 100644
--- a/llvm/test/CodeGen/X86/cmovcmov.ll
+++ b/llvm/test/CodeGen/X86/cmovcmov.ll
@@ -227,8 +227,8 @@ attributes #0 = { nounwind }
; The following test failed because llvm had a bug where a structure like:
;
-; %vreg12<def> = CMOV_GR8 %vreg7, %vreg11 ... (lt)
-; %vreg13<def> = CMOV_GR8 %vreg12, %vreg11 ... (gt)
+; %12<def> = CMOV_GR8 %7, %11 ... (lt)
+; %13<def> = CMOV_GR8 %12, %11 ... (gt)
;
; was lowered to:
;
@@ -239,9 +239,9 @@ attributes #0 = { nounwind }
; JG_1 BB#9
; BB#8:
; BB#9:
-; vreg12 = phi(vreg7, BB#8, vreg11, BB#0, vreg12, BB#7)
-; vreg13 = COPY vreg12
-; Which was invalid as %vreg12 is not the same value as %vreg13
+; %12 = phi(%7, BB#8, %11, BB#0, %12, BB#7)
+; %13 = COPY %12
+; Which was invalid as %12 is not the same value as %13
; CHECK-LABEL: no_cascade_opt:
; CMOV-DAG: cmpl %edx, %esi
diff --git a/llvm/test/CodeGen/X86/coalescer-dce.ll b/llvm/test/CodeGen/X86/coalescer-dce.ll
index 8d039ac6f7b..d97d11c6695 100644
--- a/llvm/test/CodeGen/X86/coalescer-dce.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dce.ll
@@ -4,28 +4,28 @@ target triple = "x86_64-apple-macosx10.7.0"
; This test case has a sub-register join followed by a remat:
;
-; 256L %vreg2<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg2 GR64:%vreg7
-; Considering merging %vreg2 with %vreg7:sub_32bit
+; 256L %2<def> = COPY %7:sub_32bit<kill>; GR32:%2 GR64:%7
+; Considering merging %2 with %7:sub_32bit
; Cross-class to GR64.
-; RHS = %vreg2 = [256d,272d:0) 0@256d
-; LHS = %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d
-; updated: 272L %vreg0<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg0 GR64:%vreg7
-; Joined. Result = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+; RHS = %2 = [256d,272d:0) 0@256d
+; LHS = %7 = [208d,256d:0)[304L,480L:0) 0@208d
+; updated: 272L %0<def> = COPY %7:sub_32bit<kill>; GR32:%0 GR64:%7
+; Joined. Result = %7 = [208d,272d:0)[304L,480L:0) 0@208d
;
-; 272L %vreg10:sub_32bit<def> = COPY %vreg7:sub_32bit<kill>, %vreg10<imp-def>; GR64:%vreg10,%vreg7
-; Considering merging %vreg7 with %vreg10
-; RHS = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
-; LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
-; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %eflags<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10
-; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+; 272L %10:sub_32bit<def> = COPY %7:sub_32bit<kill>, %10<imp-def>; GR64:%10,%7
+; Considering merging %7 with %10
+; RHS = %7 = [208d,272d:0)[304L,480L:0) 0@208d
+; LHS = %10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
+; Remat: %10<def> = MOV64r0 %10<imp-def>, %eflags<imp-def,dead>, %10<imp-def>; GR64:%10
+; Shrink: %7 = [208d,272d:0)[304L,480L:0) 0@208d
; live-in at 240L
; live-in at 416L
; live-in at 320L
; live-in at 304L
-; Shrunk: %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d
+; Shrunk: %7 = [208d,256d:0)[304L,480L:0) 0@208d
;
; The COPY at 256L is rewritten as a partial def, and that would artificially
-; extend the live range of %vreg7 to end at 256d. When the joined copy is
+; extend the live range of %7 to end at 256d. When the joined copy is
; removed, -verify-coalescing complains about the dangling kill.
;
; <rdar://problem/9967101>
diff --git a/llvm/test/CodeGen/X86/crash.ll b/llvm/test/CodeGen/X86/crash.ll
index ea648e57b53..537a09b1c60 100644
--- a/llvm/test/CodeGen/X86/crash.ll
+++ b/llvm/test/CodeGen/X86/crash.ll
@@ -481,10 +481,10 @@ declare void @fn3(...)
; Check coalescing of IMPLICIT_DEF instructions:
;
-; %vreg1 = IMPLICIT_DEF
-; %vreg2 = MOV32r0
+; %1 = IMPLICIT_DEF
+; %2 = MOV32r0
;
-; When coalescing %vreg1 and %vreg2, the IMPLICIT_DEF instruction should be
+; When coalescing %1 and %2, the IMPLICIT_DEF instruction should be
; erased along with its value number.
;
define void @rdar12474033() nounwind ssp {
diff --git a/llvm/test/CodeGen/X86/handle-move.ll b/llvm/test/CodeGen/X86/handle-move.ll
index 8acfd7ff209..a152f6db54e 100644
--- a/llvm/test/CodeGen/X86/handle-move.ll
+++ b/llvm/test/CodeGen/X86/handle-move.ll
@@ -8,8 +8,8 @@
; %edx has a live range into the function and is used by the DIV32r.
;
; Here sinking a kill + dead def:
-; 144B -> 180B: DIV32r %vreg4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
-; %vreg4: [48r,144r:0) 0@48r
+; 144B -> 180B: DIV32r %4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+; %4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r
; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r
@@ -25,8 +25,8 @@ entry:
}
; Same as above, but moving a kill + live def:
-; 144B -> 180B: DIV32r %vreg4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
-; %vreg4: [48r,144r:0) 0@48r
+; 144B -> 180B: DIV32r %4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+; %4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r
; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r
@@ -41,13 +41,13 @@ entry:
ret i32 %add
}
-; Moving a use below the existing kill (%vreg5):
-; Moving a tied virtual register def (%vreg11):
+; Moving a use below the existing kill (%5):
+; Moving a tied virtual register def (%11):
;
-; 96B -> 120B: %vreg11<def,tied1> = SUB32rr %vreg11<tied0>, %vreg5
-; %vreg11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r
+; 96B -> 120B: %11<def,tied1> = SUB32rr %11<tied0>, %5
+; %11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r
; --> [80r,120r:1)[120r,144r:0) 0@120r 1@80r
-; %vreg5: [16r,112r:0) 0@16r
+; %5: [16r,112r:0) 0@16r
; --> [16r,120r:0) 0@16r
;
define i32 @f3(i32 %a, i32 %b) nounwind uwtable readnone ssp {
diff --git a/llvm/test/CodeGen/X86/invalid-liveness.mir b/llvm/test/CodeGen/X86/invalid-liveness.mir
index 28f8135c585..47db8090a92 100644
--- a/llvm/test/CodeGen/X86/invalid-liveness.mir
+++ b/llvm/test/CodeGen/X86/invalid-liveness.mir
@@ -5,11 +5,11 @@
define void @func() { ret void }
...
---
-# Liveness calculation should detect that we do not have a definition for vreg0
-# on all paths; In this example a def for vreg0 is missing when jumping from
+# Liveness calculation should detect that we do not have a definition for %0
+# on all paths; In this example a def for %0 is missing when jumping from
# bb.0 to bb.3.
#
-# CHECK: Use of %vreg0 does not have a corresponding definition on every path
+# CHECK: Use of %0 does not have a corresponding definition on every path
# CHECK: ERROR: Use not jointly dominated by defs.
name: func
registers:
diff --git a/llvm/test/CodeGen/X86/liveness-local-regalloc.ll b/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
index 0954f9d5dd4..5301485353d 100644
--- a/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
+++ b/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
@@ -62,7 +62,7 @@ infloop1: ; preds = %infloop1, %bb5
; RAFast would forget to add a super-register <imp-def> when rewriting:
-; %vreg10:sub_32bit<def,read-undef> = COPY %R9D<kill>
+; %10:sub_32bit<def,read-undef> = COPY %R9D<kill>
; This trips up the machine code verifier.
define void @autogen_SD24657(i8*, i32*, i64*, i32, i64, i8) {
BB:
diff --git a/llvm/test/CodeGen/X86/misched-copy.ll b/llvm/test/CodeGen/X86/misched-copy.ll
index 1263bf91fa8..98890c66ba5 100644
--- a/llvm/test/CodeGen/X86/misched-copy.ll
+++ b/llvm/test/CodeGen/X86/misched-copy.ll
@@ -10,7 +10,7 @@
;
; CHECK: *** Final schedule for BB#1 ***
; CHECK: %eax<def> = COPY
-; CHECK-NEXT: MUL32r %vreg{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
+; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK: DIVSSrm
diff --git a/llvm/test/CodeGen/X86/norex-subreg.ll b/llvm/test/CodeGen/X86/norex-subreg.ll
index 9efafe42718..66e5ca1e30c 100644
--- a/llvm/test/CodeGen/X86/norex-subreg.ll
+++ b/llvm/test/CodeGen/X86/norex-subreg.ll
@@ -41,10 +41,10 @@ entry:
; This test case extracts a sub_8bit_hi sub-register:
;
-; %vreg2<def> = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1
-; TEST8ri %vreg2, 1, %eflags<imp-def>; GR8:%vreg2
+; %2<def> = COPY %1:sub_8bit_hi; GR8:%2 GR64_ABCD:%1
+; TEST8ri %2, 1, %eflags<imp-def>; GR8:%2
;
-; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible.
+; %2 must be constrained to GR8_NOREX, or the COPY could become impossible.
;
; PR11088
diff --git a/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
index 720ed69ba26..7839936c40a 100644
--- a/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
+++ b/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s
; rdar://5571034
-; This requires physreg joining, %vreg13 is live everywhere:
-; 304L %cl<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13
-; 320L %vreg15<def> = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19
-; 336L %vreg15<def> = SAR32rCL %vreg15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%vreg15
+; This requires physreg joining, %13 is live everywhere:
+; 304L %cl<def> = COPY %13:sub_8bit; GR32_ABCD:%13
+; 320L %15<def> = COPY %19; GR32:%15 GR32_NOSP:%19
+; 336L %15<def> = SAR32rCL %15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%15
define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp {
; CHECK-LABEL: foo:
diff --git a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
index a703f5f8f14..e3928725291 100644
--- a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
+++ b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
@@ -148,7 +148,7 @@ body: |
# Let's verify that the slot index ranges for the unused variables argc/argv,
# connected to physical regs %edi and %rsi, does not overlap with the ranges
-# for %vreg2 and %vreg3. The register allocator is actually allocating the
+# for %2 and %3. The register allocator is actually allocating the
# virtual registers # to %edi and %esi, so the ranges for argc/argv should
# not cover the whole BB.
#
@@ -157,7 +157,7 @@ body: |
# CHECKDBG-NEXT: [0B;0e):0 BB#0-160B
# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=%rsi
# CHECKDBG-NEXT: [0B;0e):0 BB#0-160B
-# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%vreg2
+# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%2
# CHECKDBG-NEXT: [16r;64r):0 BB#0-160B
-# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=%vreg3
+# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=%3
# CHECKDBG-NEXT: [32r;80r):0 BB#0-160B
OpenPOWER on IntegriCloud