summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/DetectDeadLanes.cpp12
-rw-r--r--llvm/lib/CodeGen/LiveIntervalAnalysis.cpp20
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp2
-rw-r--r--llvm/lib/CodeGen/PeepholeOptimizer.cpp20
-rw-r--r--llvm/lib/CodeGen/RegAllocGreedy.cpp30
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp54
-rw-r--r--llvm/lib/CodeGen/RenameIndependentSubregs.cpp24
-rw-r--r--llvm/lib/CodeGen/SplitKit.cpp4
-rw-r--r--llvm/lib/CodeGen/TargetRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp22
-rw-r--r--llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp34
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.h16
-rw-r--r--llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/BitTracker.cpp16
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp22
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp34
-rw-r--r--llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp70
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenInsert.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPeephole.cpp40
-rw-r--r--llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXPeephole.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp38
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCMIPeephole.cpp10
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp40
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp4
35 files changed, 303 insertions, 303 deletions
diff --git a/llvm/lib/CodeGen/DetectDeadLanes.cpp b/llvm/lib/CodeGen/DetectDeadLanes.cpp
index ef4e2aaaf48..b82876e1c85 100644
--- a/llvm/lib/CodeGen/DetectDeadLanes.cpp
+++ b/llvm/lib/CodeGen/DetectDeadLanes.cpp
@@ -17,12 +17,12 @@
/// when subregisters are involved.
///
/// Example:
-/// %vreg0 = some definition
-/// %vreg1 = IMPLICIT_DEF
-/// %vreg2 = REG_SEQUENCE %vreg0, sub0, %vreg1, sub1
-/// %vreg3 = EXTRACT_SUBREG %vreg2, sub1
-/// = use %vreg3
-/// The %vreg0 definition is dead and %vreg3 contains an undefined value.
+/// %0 = some definition
+/// %1 = IMPLICIT_DEF
+/// %2 = REG_SEQUENCE %0, sub0, %1, sub1
+/// %3 = EXTRACT_SUBREG %2, sub1
+/// = use %3
+/// The %0 definition is dead and %3 contains an undefined value.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index c55519387d1..fb7fbe7f1c2 100644
--- a/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -698,11 +698,11 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// Check if any of the regunits are live beyond the end of RI. That could
// happen when a physreg is defined as a copy of a virtreg:
//
- // %eax = COPY %vreg5
- // FOO %vreg5 <--- MI, cancel kill because %eax is live.
+ // %eax = COPY %5
+ // FOO %5 <--- MI, cancel kill because %eax is live.
// BAR %eax<kill>
//
- // There should be no kill flag on FOO when %vreg5 is rewritten as %eax.
+ // There should be no kill flag on FOO when %5 is rewritten as %eax.
for (auto &RUP : RU) {
const LiveRange &RURange = *RUP.first;
LiveRange::const_iterator &I = RUP.second;
@@ -719,13 +719,13 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// When reading a partial undefined value we must not add a kill flag.
// The regalloc might have used the undef lane for something else.
// Example:
- // %vreg1 = ... ; R32: %vreg1
- // %vreg2:high16 = ... ; R64: %vreg2
- // = read %vreg2<kill> ; R64: %vreg2
- // = read %vreg1 ; R32: %vreg1
- // The <kill> flag is correct for %vreg2, but the register allocator may
- // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0
- // are actually never written by %vreg2. After assignment the <kill>
+ // %1 = ... ; R32: %1
+ // %2:high16 = ... ; R64: %2
+ // = read %2<kill> ; R64: %2
+ // = read %1 ; R32: %1
+ // The <kill> flag is correct for %2, but the register allocator may
+ // assign R0L to %1, and R0 to %2 because the low 32bits of R0
+ // are actually never written by %2. After assignment the <kill>
// flag at the read instruction is invalid.
LaneBitmask DefinedLanesMask;
if (!SRs.empty()) {
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 4f6eb428c8e..83a9e1a58c0 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1961,7 +1961,7 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
if (MOI->isDef()) {
if (Sub != 0) {
hasSubRegDef = true;
- // An operand vreg0:sub0<def> reads vreg0:sub1..n. Invert the lane
+ // An operand %0:sub0<def> reads %0:sub1..n. Invert the lane
// mask for subregister defs. Read-undef defs will be handled by
// readsReg below.
SLM = ~SLM;
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index dfad7615bca..e4c2aa46478 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -1453,10 +1453,10 @@ bool PeepholeOptimizer::foldImmediate(
// only the first copy is considered.
//
// e.g.
-// %vreg1 = COPY %vreg0
-// %vreg2 = COPY %vreg0:sub1
+// %1 = COPY %0
+// %2 = COPY %0:sub1
//
-// Should replace %vreg2 uses with %vreg1:sub1
+// Should replace %2 uses with %1:sub1
bool PeepholeOptimizer::foldRedundantCopy(
MachineInstr *MI, SmallSet<unsigned, 4> &CopySrcRegs,
DenseMap<unsigned, MachineInstr *> &CopyMIs) {
@@ -1621,16 +1621,16 @@ bool PeepholeOptimizer::findTargetRecurrence(
/// from the phi. For example, if there is a recurrence of
///
/// LoopHeader:
-/// %vreg1 = phi(%vreg0, %vreg100)
+/// %1 = phi(%0, %100)
/// LoopLatch:
-/// %vreg0<def, tied1> = ADD %vreg2<def, tied0>, %vreg1
+/// %0<def, tied1> = ADD %2<def, tied0>, %1
///
-/// , the fact that vreg0 and vreg2 are in the same tied operands set makes
+/// , the fact that %0 and %2 are in the same tied operands set makes
/// the coalescing of copy instruction generated from the phi in
-/// LoopHeader(i.e. %vreg1 = COPY %vreg0) impossible, because %vreg1 and
-/// %vreg2 have overlapping live range. This introduces additional move
-/// instruction to the final assembly. However, if we commute %vreg2 and
-/// %vreg1 of ADD instruction, the redundant move instruction can be
+/// LoopHeader(i.e. %1 = COPY %0) impossible, because %1 and
+/// %2 have overlapping live range. This introduces additional move
+/// instruction to the final assembly. However, if we commute %2 and
+/// %1 of ADD instruction, the redundant move instruction can be
/// avoided.
bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) {
SmallSet<unsigned, 2> TargetRegs;
diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 39676fed3d0..c3d94d8a5eb 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -1396,30 +1396,30 @@ BlockFrequency RAGreedy::calcSpillCost() {
/// Such sequences are created in 2 scenarios:
///
/// Scenario #1:
-/// vreg0 is evicted from physreg0 by vreg1.
-/// Evictee vreg0 is intended for region splitting with split candidate
-/// physreg0 (the reg vreg0 was evicted from).
+/// %0 is evicted from physreg0 by %1.
+/// Evictee %0 is intended for region splitting with split candidate
+/// physreg0 (the reg %0 was evicted from).
/// Region splitting creates a local interval because of interference with the
-/// evictor vreg1 (normally region spliitting creates 2 interval, the "by reg"
+/// evictor %1 (normally region spliitting creates 2 interval, the "by reg"
/// and "by stack" intervals and local interval created when interference
/// occurs).
-/// One of the split intervals ends up evicting vreg2 from physreg1.
-/// Evictee vreg2 is intended for region splitting with split candidate
+/// One of the split intervals ends up evicting %2 from physreg1.
+/// Evictee %2 is intended for region splitting with split candidate
/// physreg1.
-/// One of the split intervals ends up evicting vreg3 from physreg2, etc.
+/// One of the split intervals ends up evicting %3 from physreg2, etc.
///
/// Scenario #2
-/// vreg0 is evicted from physreg0 by vreg1.
-/// vreg2 is evicted from physreg2 by vreg3 etc.
-/// Evictee vreg0 is intended for region splitting with split candidate
+/// %0 is evicted from physreg0 by %1.
+/// %2 is evicted from physreg2 by %3 etc.
+/// Evictee %0 is intended for region splitting with split candidate
/// physreg1.
/// Region splitting creates a local interval because of interference with the
-/// evictor vreg1.
-/// One of the split intervals ends up evicting back original evictor vreg1
-/// from physreg0 (the reg vreg0 was evicted from).
-/// Another evictee vreg2 is intended for region splitting with split candidate
+/// evictor %1.
+/// One of the split intervals ends up evicting back original evictor %1
+/// from physreg0 (the reg %0 was evicted from).
+/// Another evictee %2 is intended for region splitting with split candidate
/// physreg1.
-/// One of the split intervals ends up evicting vreg3 from physreg2, etc.
+/// One of the split intervals ends up evicting %3 from physreg2, etc.
///
/// \param Evictee The register considered to be split.
/// \param Cand The split candidate that determines the physical register
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 81f9a343dc1..128a07cef10 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -228,9 +228,9 @@ namespace {
/// flag.
/// This can happen when undef uses were previously concealed by a copy
/// which we coalesced. Example:
- /// %vreg0:sub0<def,read-undef> = ...
- /// %vreg1 = COPY %vreg0 <-- Coalescing COPY reveals undef
- /// = use %vreg1:sub1 <-- hidden undef use
+ /// %0:sub0<def,read-undef> = ...
+ /// %1 = COPY %0 <-- Coalescing COPY reveals undef
+ /// = use %1:sub1 <-- hidden undef use
void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
MachineOperand &MO, unsigned SubRegIdx);
@@ -1143,10 +1143,10 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
NewMI.setDebugLoc(DL);
// In a situation like the following:
- // %vreg0:subreg = instr ; DefMI, subreg = DstIdx
- // %vreg1 = copy %vreg0:subreg ; CopyMI, SrcIdx = 0
- // instead of widening %vreg1 to the register class of %vreg0 simply do:
- // %vreg1 = instr
+ // %0:subreg = instr ; DefMI, subreg = DstIdx
+ // %1 = copy %0:subreg ; CopyMI, SrcIdx = 0
+ // instead of widening %1 to the register class of %0 simply do:
+ // %1 = instr
const TargetRegisterClass *NewRC = CP.getNewRC();
if (DstIdx != 0) {
MachineOperand &DefMO = NewMI.getOperand(0);
@@ -1226,12 +1226,12 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// This could happen if the rematerialization instruction is rematerializing
// more than actually is used in the register.
// An example would be:
- // vreg1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
+ // %1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
// ; Copying only part of the register here, but the rest is undef.
- // vreg2:sub_16bit<def, read-undef> = COPY vreg1:sub_16bit
+ // %2:sub_16bit<def, read-undef> = COPY %1:sub_16bit
// ==>
// ; Materialize all the constants but only using one
- // vreg2 = LOAD_CONSTANTS 5, 8
+ // %2 = LOAD_CONSTANTS 5, 8
//
// at this point for the part that wasn't defined before we could have
// subranges missing the definition.
@@ -1254,11 +1254,11 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// Make sure that the subrange for resultant undef is removed
// For example:
- // vreg1:sub1<def,read-undef> = LOAD CONSTANT 1
- // vreg2<def> = COPY vreg1
+ // %1:sub1<def,read-undef> = LOAD CONSTANT 1
+ // %2<def> = COPY %1
// ==>
- // vreg2:sub1<def, read-undef> = LOAD CONSTANT 1
- // ; Correct but need to remove the subrange for vreg2:sub0
+ // %2:sub1<def, read-undef> = LOAD CONSTANT 1
+ // ; Correct but need to remove the subrange for %2:sub0
// ; as it is now undef
if (NewIdx != 0 && DstInt.hasSubRanges()) {
// The affected subregister segments can be removed.
@@ -1292,15 +1292,15 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// Otherwise, variables that live through may miss some
// interferences, thus creating invalid allocation.
// E.g., i386 code:
- // vreg1 = somedef ; vreg1 GR8
- // vreg2 = remat ; vreg2 GR32
- // CL = COPY vreg2.sub_8bit
- // = somedef vreg1 ; vreg1 GR8
+ // %1 = somedef ; %1 GR8
+ // %2 = remat ; %2 GR32
+ // CL = COPY %2.sub_8bit
+ // = somedef %1 ; %1 GR8
// =>
- // vreg1 = somedef ; vreg1 GR8
+ // %1 = somedef ; %1 GR8
// ECX<def, dead> = remat ; CL<imp-def>
- // = somedef vreg1 ; vreg1 GR8
- // vreg1 will see the inteferences with CL but not with CH since
+ // = somedef %1 ; %1 GR8
+ // %1 will see the inteferences with CL but not with CH since
// no live-ranges would have been created for ECX.
// Fix that!
SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
@@ -1353,9 +1353,9 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
// ProcessImpicitDefs may leave some copies of <undef> values, it only removes
// local variables. When we have a copy like:
//
- // %vreg1 = COPY %vreg2<undef>
+ // %1 = COPY %2<undef>
//
- // We delete the copy and remove the corresponding value number from %vreg1.
+ // We delete the copy and remove the corresponding value number from %1.
// Any uses of that value number are marked as <undef>.
// Note that we do not query CoalescerPair here but redo isMoveInstr as the
@@ -1820,18 +1820,18 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
MachineInstr *CopyMI;
if (CP.isFlipped()) {
// Physreg is copied into vreg
- // %vregY = COPY %x
+ // %y = COPY %physreg_x
// ... //< no other def of %x here
- // use %vregY
+ // use %y
// =>
// ...
// use %x
CopyMI = MRI->getVRegDef(SrcReg);
} else {
// VReg is copied into physreg:
- // %vregX = def
+ // %y = def
// ... //< no other def or use of %y here
- // %y = COPY %vregX
+ // %y = COPY %physreg_x
// =>
// %y = def
// ...
diff --git a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp
index 72b7960f327..b423d674364 100644
--- a/llvm/lib/CodeGen/RenameIndependentSubregs.cpp
+++ b/llvm/lib/CodeGen/RenameIndependentSubregs.cpp
@@ -10,20 +10,20 @@
/// Rename independent subregisters looks for virtual registers with
/// independently used subregisters and renames them to new virtual registers.
/// Example: In the following:
-/// %vreg0:sub0<read-undef> = ...
-/// %vreg0:sub1 = ...
-/// use %vreg0:sub0
-/// %vreg0:sub0 = ...
-/// use %vreg0:sub0
-/// use %vreg0:sub1
+/// %0:sub0<read-undef> = ...
+/// %0:sub1 = ...
+/// use %0:sub0
+/// %0:sub0 = ...
+/// use %0:sub0
+/// use %0:sub1
/// sub0 and sub1 are never used together, and we have two independent sub0
/// definitions. This pass will rename to:
-/// %vreg0:sub0<read-undef> = ...
-/// %vreg1:sub1<read-undef> = ...
-/// use %vreg1:sub1
-/// %vreg2:sub1<read-undef> = ...
-/// use %vreg2:sub1
-/// use %vreg0:sub0
+/// %0:sub0<read-undef> = ...
+/// %1:sub1<read-undef> = ...
+/// use %1:sub1
+/// %2:sub1<read-undef> = ...
+/// use %2:sub1
+/// use %0:sub0
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp
index 59c5798ab49..49f31333acf 100644
--- a/llvm/lib/CodeGen/SplitKit.cpp
+++ b/llvm/lib/CodeGen/SplitKit.cpp
@@ -1375,9 +1375,9 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
continue;
// The problem here can be that the new register may have been created
// for a partially defined original register. For example:
- // %vreg827:subreg_hireg<def,read-undef> = ...
+ // %0:subreg_hireg<def,read-undef> = ...
// ...
- // %vreg828<def> = COPY %vreg827
+ // %1<def> = COPY %0
if (S.empty())
continue;
SubLRC.reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index cc5c1485608..721761eef61 100644
--- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -93,7 +93,7 @@ Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI,
else if (TargetRegisterInfo::isStackSlot(Reg))
OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
else if (TargetRegisterInfo::isVirtualRegister(Reg))
- OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg);
+ OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
else if (TRI && Reg < TRI->getNumRegs()) {
OS << '%';
printLowerCase(TRI->getName(Reg), OS);
@@ -134,7 +134,7 @@ Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
return Printable([Unit, TRI](raw_ostream &OS) {
if (TRI && TRI->isVirtualRegister(Unit)) {
- OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Unit);
+ OS << '%' << TargetRegisterInfo::virtReg2Index(Unit);
} else {
OS << printRegUnit(Unit, TRI);
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index bf5f0f624af..bc3c0a4a60e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2801,14 +2801,14 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
//
- // %vreg0<def> = COPY %sp; GPR64all:%vreg0
+ // %0<def> = COPY %sp; GPR64all:%0
//
// We explicitly chose GPR64all for the virtual register so such a copy might
// be eliminated by RegisterCoalescer. However, that may not be possible, and
- // %vreg0 may even spill. We can't spill %sp, and since it is in the GPR64all
+ // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
// register class, TargetInstrInfo::foldMemoryOperand() is going to try.
//
- // To prevent that, we are going to constrain the %vreg0 register class here.
+ // To prevent that, we are going to constrain the %0 register class here.
//
// <rdar://problem/11522048>
//
@@ -2830,7 +2830,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle the case where a copy is being spilled or filled but the source
// and destination register class don't match. For example:
//
- // %vreg0<def> = COPY %xzr; GPR64common:%vreg0
+ // %0<def> = COPY %xzr; GPR64common:%0
//
// In this case we can still safely fold away the COPY and generate the
// following spill code:
@@ -2840,16 +2840,16 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// This also eliminates spilled cross register class COPYs (e.g. between x and
// d regs) of the same size. For example:
//
- // %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
+ // %0<def> = COPY %1; GPR64:%0, FPR64:%1
//
// will be filled as
//
- // LDRDui %vreg0, fi<#0>
+ // LDRDui %0, fi<#0>
//
// instead of
//
- // LDRXui %vregTemp, fi<#0>
- // %vreg0 = FMOV %vregTemp
+ // LDRXui %Temp, fi<#0>
+ // %0 = FMOV %Temp
//
if (MI.isCopy() && Ops.size() == 1 &&
// Make sure we're only folding the explicit COPY defs/uses.
@@ -2886,7 +2886,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle cases like spilling def of:
//
- // %vreg0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%vreg0
+ // %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
//
// where the physical register source can be widened and stored to the full
// virtual reg destination stack slot, in this case producing:
@@ -2934,12 +2934,12 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle cases like filling use of:
//
- // %vreg0:sub_32<def,read-undef> = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1
+ // %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
//
// where we can load the full virtual reg source stack slot, into the subreg
// destination, in this case producing:
//
- // LDRWui %vreg0:sub_32<def,read-undef>, <fi#0>
+ // LDRWui %0:sub_32<def,read-undef>, <fi#0>
//
if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
const TargetRegisterClass *FillRC;
diff --git a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 972e61d376d..1bfa837bfb2 100644
--- a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -12,16 +12,16 @@
/// common data and/or have enough undef subreg using swizzle abilities.
///
/// For instance let's consider the following pseudo code :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
/// ...
-/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3
+/// %7<def> = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
+/// (swizzable Inst) %7, SwizzleMask : sub0, sub1, sub2, sub3
///
/// is turned into :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
/// ...
-/// vreg7<def> = INSERT_SUBREG vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3
+/// %7<def> = INSERT_SUBREG %4, sub3
+/// (swizzable Inst) %7, SwizzleMask : sub0, sub2, sub1, sub3
///
/// This allow regalloc to reduce register pressure for vector registers and
/// to reduce MOV count.
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 34b1f758f7b..e9b381ce89b 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -14,46 +14,46 @@
/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
-/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// %0 <sgpr> = SCALAR_INST
+/// %1 <vsrc> = COPY %0 <sgpr>
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+/// %4 <vsrc> = PHI %1 <vsrc>, <BB#0>, %3 <vrsc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
///
///
/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
/// code will look like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <vsrc> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <vsrc>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now that the result of the PHI instruction is an SGPR, the register
-/// allocator is now forced to constrain the register class of %vreg3 to
+/// allocator is now forced to constrain the register class of %3 to
/// <sgpr> so we end up with final code like this:
///
/// BB0:
-/// %vreg0 <sgpr> = SCALAR_INST
+/// %0 <sgpr> = SCALAR_INST
/// ...
/// BRANCH %cond BB1, BB2
/// BB1:
-/// %vreg2 <vgpr> = VECTOR_INST
-/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// %2 <vgpr> = VECTOR_INST
+/// %3 <sgpr> = COPY %2 <vgpr>
/// BB2:
-/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
-/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+/// %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <sgpr>, <BB#1>
+/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
///
/// Now this code contains an illegal copy from a VGPR to an SGPR.
///
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 2c52e16892c..52157408b36 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -290,11 +290,11 @@ void SIFoldOperands::foldOperand(
// copy since a subregister use tied to a full register def doesn't really
// make sense. e.g. don't fold:
//
- // %vreg1 = COPY %vreg0:sub1
- // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
+ // %1 = COPY %0:sub1
+ // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
//
// into
- // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
+ // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
return;
}
@@ -971,7 +971,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
// Prevent folding operands backwards in the function. For example,
// the COPY opcode must not be replaced by 1 in this example:
//
- // %vreg3<def> = COPY %vgpr0; VGPR_32:%vreg3
+ // %3<def> = COPY %vgpr0; VGPR_32:%3
// ...
// %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use>
MachineOperand &Dst = MI.getOperand(0);
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 5738077f989..bb8fa2c89fb 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -10,12 +10,12 @@
/// \file This pass tries to apply several peephole SDWA patterns.
///
/// E.g. original:
-/// V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
-/// V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
-/// V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
+/// V_LSHRREV_B32_e32 %0, 16, %1
+/// V_ADD_I32_e32 %2, %0, %3
+/// V_LSHLREV_B32_e32 %4, 16, %2
///
/// Replace:
-/// V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3
+/// V_ADD_I32_sdwa %4, %1, %3
/// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
///
//===----------------------------------------------------------------------===//
@@ -410,7 +410,7 @@ Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
}
// If this is not immediate then it can be copy of immediate value, e.g.:
- // %vreg1<def> = S_MOV_B32 255;
+ // %1<def> = S_MOV_B32 255;
if (Op.isReg()) {
for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
if (!isSameReg(Op, Def))
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 152b24599e9..4407a9d0f37 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1347,13 +1347,13 @@ bool SIRegisterInfo::shouldRewriteCopySrc(
// class.
//
// e.g. if we have something like
- // vreg0 = ...
- // vreg1 = ...
- // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
- // vreg3 = COPY vreg2, sub0
+ // %0 = ...
+ // %1 = ...
+ // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
+ // %3 = COPY %2, sub0
//
// We want to look through the COPY to find:
- // => vreg3 = COPY vreg0
+ // => %3 = COPY %0
// Plain copy.
return getCommonSubClass(DefRC, SrcRC) != nullptr;
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 6268b9ef2a3..f9505beea20 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1650,7 +1650,7 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
}
for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
- // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
+ // %12<def> = PICLDR %11, 0, pred:14, pred:%noreg
const MachineOperand &MO0 = MI0.getOperand(i);
const MachineOperand &MO1 = MI1.getOperand(i);
if (!MO0.isIdenticalTo(MO1))
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 2ff4b1100ee..d375f40d6e1 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -47,10 +47,10 @@ protected:
/// and \p DefIdx.
/// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
/// the list is modeled as <Reg:SubReg, SubIdx>.
- /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
+ /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
/// two elements:
- /// - vreg1:sub1, sub0
- /// - vreg2<:0>, sub1
+ /// - %1:sub1, sub0
+ /// - %2<:0>, sub1
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
@@ -63,8 +63,8 @@ protected:
/// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
/// and \p DefIdx.
/// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
- /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
- /// - vreg1:sub1, sub0
+ /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+ /// - %1:sub1, sub0
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
@@ -77,9 +77,9 @@ protected:
/// and \p DefIdx.
/// \p [out] BaseReg and \p [out] InsertedReg contain
/// the equivalent inputs of INSERT_SUBREG.
- /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
- /// - BaseReg: vreg0:sub0
- /// - InsertedReg: vreg1:sub1, sub3
+ /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+ /// - BaseReg: %0:sub0
+ /// - InsertedReg: %1:sub1, sub3
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 1c12c23c931..ef52bae3d76 100644
--- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -546,7 +546,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
return;
unsigned AndOpReg = RegN->getReg();
- DEBUG(dbgs() << "Examine %vreg" << TargetRegisterInfo::virtReg2Index(AndOpReg)
+ DEBUG(dbgs() << "Examine %" << TargetRegisterInfo::virtReg2Index(AndOpReg)
<< '\n');
// Examine the PHI insns in the MachineBasicBlock to found out the
@@ -574,9 +574,9 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
return;
} else {
// The PHI node looks like:
- // %vreg2<def> = PHI %vreg0, <BB#1>, %vreg1, <BB#3>
- // Trace each incoming definition, e.g., (%vreg0, BB#1) and (%vreg1, BB#3)
- // The AND operation can be removed if both %vreg0 in BB#1 and %vreg1 in
+ // %2<def> = PHI %0, <BB#1>, %1, <BB#3>
+ // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3)
+ // The AND operation can be removed if both %0 in BB#1 and %1 in
// BB#3 are defined with with a load matching the MaskN.
DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n');
unsigned PrevReg = -1;
diff --git a/llvm/lib/Target/Hexagon/BitTracker.cpp b/llvm/lib/Target/Hexagon/BitTracker.cpp
index 5e20d8ca0fd..4a10408d8c7 100644
--- a/llvm/lib/Target/Hexagon/BitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/BitTracker.cpp
@@ -18,16 +18,16 @@
// A "ref" value is associated with a BitRef structure, which indicates
// which virtual register, and which bit in that register is the origin
// of the value. For example, given an instruction
-// vreg2 = ASL vreg1, 1
-// assuming that nothing is known about bits of vreg1, bit 1 of vreg2
-// will be a "ref" to (vreg1, 0). If there is a subsequent instruction
-// vreg3 = ASL vreg2, 2
-// then bit 3 of vreg3 will be a "ref" to (vreg1, 0) as well.
+// %2 = ASL %1, 1
+// assuming that nothing is known about bits of %1, bit 1 of %2
+// will be a "ref" to (%1, 0). If there is a subsequent instruction
+// %3 = ASL %2, 2
+// then bit 3 of %3 will be a "ref" to (%1, 0) as well.
// The "bottom" case means that the bit's value cannot be determined,
// and that this virtual register actually defines it. The "bottom" case
// is discussed in detail in BitTracker.h. In fact, "bottom" is a "ref
-// to self", so for the vreg1 above, the bit 0 of it will be a "ref" to
-// (vreg1, 0), bit 1 will be a "ref" to (vreg1, 1), etc.
+// to self", so for the %1 above, the bit 0 of it will be a "ref" to
+// (%1, 0), bit 1 will be a "ref" to (%1, 1), etc.
//
// The tracker implements the Wegman-Zadeck algorithm, originally developed
// for SSA-based constant propagation. Each register is represented as
@@ -75,7 +75,7 @@ using BT = BitTracker;
namespace {
- // Local trickery to pretty print a register (without the whole "%vreg"
+ // Local trickery to pretty print a register (without the whole "%number"
// business).
struct printv {
printv(unsigned r) : R(r) {}
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index cbf1b0dc040..d3cb53e3594 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -895,7 +895,7 @@ bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
}
// Calculate the register class that matches Reg:Sub. For example, if
-// vreg1 is a double register, then vreg1:isub_hi would match the "int"
+// %1 is a double register, then %1:isub_hi would match the "int"
// register class.
const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
@@ -1246,11 +1246,11 @@ bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
// holds the bits for the entire register. To keep track of that, the
// argument Begin indicates where in Bits is the lowest-significant bit
// of the register used in operand OpN. For example, in instruction:
-// vreg1 = S2_lsr_i_r vreg2:isub_hi, 10
+// %1 = S2_lsr_i_r %2:isub_hi, 10
// the operand 1 is a 32-bit register, which happens to be a subregister
-// of the 64-bit register vreg2, and that subregister starts at position 32.
+// of the 64-bit register %2, and that subregister starts at position 32.
// In this case Begin=32, since Bits[32] would be the lowest-significant bit
-// of vreg2:isub_hi.
+// of %2:isub_hi.
bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
unsigned OpN, BitVector &Bits, uint16_t Begin) {
unsigned Opc = MI.getOpcode();
@@ -1356,11 +1356,11 @@ bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
// This pass can create copies between registers that don't have the
// exact same values. Updating the tracker has to involve updating
// all dependent cells. Example:
- // vreg1 = inst vreg2 ; vreg1 != vreg2, but used bits are equal
+ // %1 = inst %2 ; %1 != %2, but used bits are equal
//
- // vreg3 = copy vreg2 ; <- inserted
- // ... = vreg3 ; <- replaced from vreg2
- // Indirectly, we can create a "copy" between vreg1 and vreg2 even
+ // %3 = copy %2 ; <- inserted
+ // ... = %3 ; <- replaced from %2
+ // Indirectly, we can create a "copy" between %1 and %2 even
// though their exact values do not match.
BT.visit(*CopyI);
Changed = true;
@@ -2313,10 +2313,10 @@ bool BitSimplification::genBitSplit(MachineInstr *MI,
// Check for tstbit simplification opportunity, where the bit being checked
// can be tracked back to another register. For example:
-// vreg2 = S2_lsr_i_r vreg1, 5
-// vreg3 = S2_tstbit_i vreg2, 0
+// %2 = S2_lsr_i_r %1, 5
+// %3 = S2_tstbit_i %2, 0
// =>
-// vreg3 = S2_tstbit_i vreg1, 5
+// %3 = S2_tstbit_i %1, 5
bool BitSimplification::simplifyTstbit(MachineInstr *MI,
BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
unsigned Opc = MI->getOpcode();
diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
index 9ca7e5f0a3c..1953439fc3e 100644
--- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -368,7 +368,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
}
}
// Defs and clobbers can overlap, e.g.
- // %d0<def,dead> = COPY %vreg5, %r0<imp-def>, %r1<imp-def>
+ // %d0<def,dead> = COPY %5, %r0<imp-def>, %r1<imp-def>
for (RegisterRef R : Defs)
Clobbers.erase(R);
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index e7c3290d151..9a8762a48fd 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -1974,7 +1974,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
{
const MachineOperand &VO = MI.getOperand(1);
// The operand of CONST32 can be a blockaddress, e.g.
- // %vreg0<def> = CONST32 <blockaddress(@eat, %l)>
+ // %0<def> = CONST32 <blockaddress(@eat, %l)>
// Do this check for all instructions for safety.
if (!VO.isImm())
return false;
diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index b2244107ac4..4a6100d02fc 100644
--- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -25,37 +25,37 @@
//
// Example:
//
-// %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
-// %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-// J2_jumpt %vreg41<kill>, <BB#5>, %pc<imp-def,dead>
+// %40<def> = L2_loadrub_io %39<kill>, 1
+// %41<def> = S2_tstbit_i %40<kill>, 0
+// J2_jumpt %41<kill>, <BB#5>, %pc<imp-def,dead>
// J2_jump <BB#4>, %pc<imp-def,dead>
// Successors according to CFG: BB#4(62) BB#5(62)
//
// BB#4: derived from LLVM BB %if.then
// Predecessors according to CFG: BB#3
-// %vreg11<def> = A2_addp %vreg6, %vreg10
-// S2_storerd_io %vreg32, 16, %vreg11
+// %11<def> = A2_addp %6, %10
+// S2_storerd_io %32, 16, %11
// Successors according to CFG: BB#5
//
// BB#5: derived from LLVM BB %if.end
// Predecessors according to CFG: BB#3 BB#4
-// %vreg12<def> = PHI %vreg6, <BB#3>, %vreg11, <BB#4>
-// %vreg13<def> = A2_addp %vreg7, %vreg12
-// %vreg42<def> = C2_cmpeqi %vreg9, 10
-// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+// %12<def> = PHI %6, <BB#3>, %11, <BB#4>
+// %13<def> = A2_addp %7, %12
+// %42<def> = C2_cmpeqi %9, 10
+// J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
// J2_jump <BB#6>, %pc<imp-def,dead>
// Successors according to CFG: BB#6(4) BB#3(124)
//
// would become:
//
-// %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
-// %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-// spec-> %vreg11<def> = A2_addp %vreg6, %vreg10
-// pred-> S2_pstorerdf_io %vreg41, %vreg32, 16, %vreg11
-// %vreg46<def> = PS_pselect %vreg41, %vreg6, %vreg11
-// %vreg13<def> = A2_addp %vreg7, %vreg46
-// %vreg42<def> = C2_cmpeqi %vreg9, 10
-// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+// %40<def> = L2_loadrub_io %39<kill>, 1
+// %41<def> = S2_tstbit_i %40<kill>, 0
+// spec-> %11<def> = A2_addp %6, %10
+// pred-> S2_pstorerdf_io %41, %32, 16, %11
+// %46<def> = PS_pselect %41, %6, %11
+// %13<def> = A2_addp %7, %46
+// %42<def> = C2_cmpeqi %9, 10
+// J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
// J2_jump <BB#6>, %pc<imp-def,dead>
// Successors according to CFG: BB#6 BB#3
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 51c3b784370..86645ddf913 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -17,33 +17,33 @@
//
// Liveness tracking aside, the main functionality of this pass is divided
// into two steps. The first step is to replace an instruction
-// vreg0 = C2_mux vreg1, vreg2, vreg3
+// %0 = C2_mux %1, %2, %3
// with a pair of conditional transfers
-// vreg0 = A2_tfrt vreg1, vreg2
-// vreg0 = A2_tfrf vreg1, vreg3
+// %0 = A2_tfrt %1, %2
+// %0 = A2_tfrf %1, %3
// It is the intention that the execution of this pass could be terminated
// after this step, and the code generated would be functionally correct.
//
-// If the uses of the source values vreg1 and vreg2 are kills, and their
+// If the uses of the source values %1 and %2 are kills, and their
// definitions are predicable, then in the second step, the conditional
// transfers will then be rewritten as predicated instructions. E.g.
-// vreg0 = A2_or vreg1, vreg2
-// vreg3 = A2_tfrt vreg99, vreg0<kill>
+// %0 = A2_or %1, %2
+// %3 = A2_tfrt %99, %0<kill>
// will be rewritten as
-// vreg3 = A2_port vreg99, vreg1, vreg2
+// %3 = A2_port %99, %1, %2
//
// This replacement has two variants: "up" and "down". Consider this case:
-// vreg0 = A2_or vreg1, vreg2
+// %0 = A2_or %1, %2
// ... [intervening instructions] ...
-// vreg3 = A2_tfrt vreg99, vreg0<kill>
+// %3 = A2_tfrt %99, %0<kill>
// variant "up":
-// vreg3 = A2_port vreg99, vreg1, vreg2
-// ... [intervening instructions, vreg0->vreg3] ...
+// %3 = A2_port %99, %1, %2
+// ... [intervening instructions, %0->vreg3] ...
// [deleted]
// variant "down":
// [deleted]
// ... [intervening instructions] ...
-// vreg3 = A2_port vreg99, vreg1, vreg2
+// %3 = A2_port %99, %1, %2
//
// Both, one or none of these variants may be valid, and checks are made
// to rule out inapplicable variants.
@@ -51,13 +51,13 @@
// As an additional optimization, before either of the two steps above is
// executed, the pass attempts to coalesce the target register with one of
// the source registers, e.g. given an instruction
-// vreg3 = C2_mux vreg0, vreg1, vreg2
-// vreg3 will be coalesced with either vreg1 or vreg2. If this succeeds,
+// %3 = C2_mux %0, %1, %2
+// %3 will be coalesced with either %1 or %2. If this succeeds,
// the instruction would then be (for example)
-// vreg3 = C2_mux vreg0, vreg3, vreg2
+// %3 = C2_mux %0, %3, %2
// and, under certain circumstances, this could result in only one predicated
// instruction:
-// vreg3 = A2_tfrf vreg0, vreg2
+// %3 = A2_tfrf %0, %2
//
// Splitting a definition of a register into two predicated transfers
@@ -65,18 +65,18 @@
// will see both instructions as actual definitions, and will mark the
// first one as dead. The definition is not actually dead, and this
// situation will need to be fixed. For example:
-// vreg1<def,dead> = A2_tfrt ... ; marked as dead
-// vreg1<def> = A2_tfrf ...
+// %1<def,dead> = A2_tfrt ... ; marked as dead
+// %1<def> = A2_tfrf ...
//
// Since any of the individual predicated transfers may end up getting
// removed (in case it is an identity copy), some pre-existing def may
// be marked as dead after live interval recomputation:
-// vreg1<def,dead> = ... ; marked as dead
+// %1<def,dead> = ... ; marked as dead
// ...
-// vreg1<def> = A2_tfrf ... ; if A2_tfrt is removed
-// This case happens if vreg1 was used as a source in A2_tfrt, which means
+// %1<def> = A2_tfrf ... ; if A2_tfrt is removed
+// This case happens if %1 was used as a source in A2_tfrt, which means
// that is it actually live at the A2_tfrf, and so the now dead definition
-// of vreg1 will need to be updated to non-dead at some point.
+// of %1 will need to be updated to non-dead at some point.
//
// This issue could be remedied by adding implicit uses to the predicated
// transfers, but this will create a problem with subsequent predication,
@@ -760,8 +760,8 @@ MachineInstr *HexagonExpandCondsets::getReachingDefForPred(RegisterRef RD,
if (RR.Reg != RD.Reg)
continue;
// If the "Reg" part agrees, there is still the subregister to check.
- // If we are looking for vreg1:loreg, we can skip vreg1:hireg, but
- // not vreg1 (w/o subregisters).
+ // If we are looking for %1:loreg, we can skip %1:hireg, but
+ // not %1 (w/o subregisters).
if (RR.Sub == RD.Sub)
return MI;
if (RR.Sub == 0 || RD.Sub == 0)
@@ -1071,7 +1071,7 @@ bool HexagonExpandCondsets::predicateInBlock(MachineBasicBlock &B,
bool Done = predicate(*I, (Opc == Hexagon::A2_tfrt), UpdRegs);
if (!Done) {
// If we didn't predicate I, we may need to remove it in case it is
- // an "identity" copy, e.g. vreg1 = A2_tfrt vreg2, vreg1.
+ // an "identity" copy, e.g. %1 = A2_tfrt %2, %1.
if (RegisterRef(I->getOperand(0)) == RegisterRef(I->getOperand(2))) {
for (auto &Op : I->operands())
if (Op.isReg())
@@ -1198,18 +1198,18 @@ bool HexagonExpandCondsets::coalesceSegments(
MachineOperand &S1 = CI->getOperand(2), &S2 = CI->getOperand(3);
bool Done = false;
// Consider this case:
- // vreg1 = instr1 ...
- // vreg2 = instr2 ...
- // vreg0 = C2_mux ..., vreg1, vreg2
- // If vreg0 was coalesced with vreg1, we could end up with the following
+ // %1 = instr1 ...
+ // %2 = instr2 ...
+ // %0 = C2_mux ..., %1, %2
+ // If %0 was coalesced with %1, we could end up with the following
// code:
- // vreg0 = instr1 ...
- // vreg2 = instr2 ...
- // vreg0 = A2_tfrf ..., vreg2
+ // %0 = instr1 ...
+ // %2 = instr2 ...
+ // %0 = A2_tfrf ..., %2
// which will later become:
- // vreg0 = instr1 ...
- // vreg0 = instr2_cNotPt ...
- // i.e. there will be an unconditional definition (instr1) of vreg0
+ // %0 = instr1 ...
+ // %0 = instr2_cNotPt ...
+ // i.e. there will be an unconditional definition (instr1) of %0
// followed by a conditional one. The output dependency was there before
// and it unavoidable, but if instr1 is predicable, we will no longer be
// able to predicate it here.
diff --git a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
index 09d3e6d4a15..d1f63699292 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -1106,10 +1106,10 @@ void HexagonGenInsert::pruneCoveredSets(unsigned VR) {
// Now, remove those whose sets of potentially removable registers are
// contained in another IF candidate for VR. For example, given these
- // candidates for vreg45,
- // %vreg45:
- // (%vreg44,%vreg41,#9,#8), { %vreg42 }
- // (%vreg43,%vreg41,#9,#8), { %vreg42 %vreg44 }
+ // candidates for %45,
+ // %45:
+ // (%44,%41,#9,#8), { %42 }
+ // (%43,%41,#9,#8), { %42 %44 }
// remove the first one, since it is contained in the second one.
for (unsigned i = 0, n = LL.size(); i < n; ) {
const RegisterSet &RMi = LL[i].second;
diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 56171f22148..5c18cc8732d 100644
--- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1622,8 +1622,8 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
RegisterInductionSet IndRegs;
// Look for induction patterns:
- // vreg1 = PHI ..., [ latch, vreg2 ]
- // vreg2 = ADD vreg1, imm
+ // %1 = PHI ..., [ latch, %2 ]
+ // %2 = ADD %1, imm
using instr_iterator = MachineBasicBlock::instr_iterator;
for (instr_iterator I = Header->instr_begin(), E = Header->instr_end();
@@ -1720,7 +1720,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
MachineOperand &MO = PredDef->getOperand(i);
if (MO.isReg()) {
// Skip all implicit references. In one case there was:
- // %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %usr<imp-use>
+ // %140<def> = FCMPUGT32_rr %138, %139, %usr<imp-use>
if (MO.isImplicit())
continue;
if (MO.isUse()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
index 0ef0e78c524..354bb95e448 100644
--- a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -8,27 +8,27 @@
// This peephole pass optimizes in the following cases.
// 1. Optimizes redundant sign extends for the following case
// Transform the following pattern
-// %vreg170<def> = SXTW %vreg166
+// %170<def> = SXTW %166
// ...
-// %vreg176<def> = COPY %vreg170:isub_lo
+// %176<def> = COPY %170:isub_lo
//
// Into
-// %vreg176<def> = COPY vreg166
+// %176<def> = COPY %166
//
// 2. Optimizes redundant negation of predicates.
-// %vreg15<def> = CMPGTrr %vreg6, %vreg2
+// %15<def> = CMPGTrr %6, %2
// ...
-// %vreg16<def> = NOT_p %vreg15<kill>
+// %16<def> = NOT_p %15<kill>
// ...
-// JMP_c %vreg16<kill>, <BB#1>, %pc<imp-def,dead>
+// JMP_c %16<kill>, <BB#1>, %pc<imp-def,dead>
//
// Into
-// %vreg15<def> = CMPGTrr %vreg6, %vreg2;
+// %15<def> = CMPGTrr %6, %2;
// ...
-// JMP_cNot %vreg15<kill>, <BB#1>, %pc<imp-def,dead>;
+// JMP_cNot %15<kill>, <BB#1>, %pc<imp-def,dead>;
//
// Note: The peephole pass makes the instrucstions like
-// %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill>
+// %170<def> = SXTW %166 or %16<def> = NOT_p %15<kill>
// redundant and relies on some form of dead removal instructions, like
// DCE or DIE to actually eliminate them.
@@ -133,7 +133,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
NextI = std::next(I);
MachineInstr &MI = *I;
// Look for sign extends:
- // %vreg170<def> = SXTW %vreg166
+ // %170<def> = SXTW %166
if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) {
assert(MI.getNumOperands() == 2);
MachineOperand &Dst = MI.getOperand(0);
@@ -144,14 +144,14 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Map the following:
- // %vreg170<def> = SXTW %vreg166
- // PeepholeMap[170] = vreg166
+ // %170<def> = SXTW %166
+ // PeepholeMap[170] = %166
PeepholeMap[DstReg] = SrcReg;
}
}
- // Look for %vreg170<def> = COMBINE_ir_V4 (0, %vreg169)
- // %vreg170:DoublRegs, %vreg169:IntRegs
+ // Look for %170<def> = COMBINE_ir_V4 (0, %169)
+ // %170:DoublRegs, %169:IntRegs
if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) {
assert(MI.getNumOperands() == 3);
MachineOperand &Dst = MI.getOperand(0);
@@ -165,10 +165,10 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
}
// Look for this sequence below
- // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32
- // %vregIntReg = COPY %vregDoubleReg1:isub_lo.
+ // %DoubleReg1 = LSRd_ri %DoubleReg0, 32
+ // %IntReg = COPY %DoubleReg1:isub_lo.
// and convert into
- // %vregIntReg = COPY %vregDoubleReg0:isub_hi.
+ // %IntReg = COPY %DoubleReg0:isub_hi.
if (MI.getOpcode() == Hexagon::S2_lsr_i_p) {
assert(MI.getNumOperands() == 3);
MachineOperand &Dst = MI.getOperand(0);
@@ -193,14 +193,14 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Map the following:
- // %vreg170<def> = NOT_xx %vreg166
- // PeepholeMap[170] = vreg166
+ // %170<def> = NOT_xx %166
+ // PeepholeMap[170] = %166
PeepholeMap[DstReg] = SrcReg;
}
}
// Look for copy:
- // %vreg176<def> = COPY %vreg170:isub_lo
+ // %176<def> = COPY %170:isub_lo
if (!DisableOptSZExt && MI.isCopy()) {
assert(MI.getNumOperands() == 2);
MachineOperand &Dst = MI.getOperand(0);
diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
index d1816cbc752..fb3e6a0fb10 100644
--- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -9,10 +9,10 @@
// Replace sequences of "narrow" stores to adjacent memory locations with
// a fewer "wide" stores that have the same effect.
// For example, replace:
-// S4_storeirb_io %vreg100, 0, 0 ; store-immediate-byte
-// S4_storeirb_io %vreg100, 1, 0 ; store-immediate-byte
+// S4_storeirb_io %100, 0, 0 ; store-immediate-byte
+// S4_storeirb_io %100, 1, 0 ; store-immediate-byte
// with
-// S4_storeirh_io %vreg100, 0, 0 ; store-immediate-halfword
+// S4_storeirh_io %100, 0, 0 ; store-immediate-halfword
// The above is the general idea. The actual cases handled by the code
// may be a bit more complex.
// The purpose of this pass is to reduce the number of outstanding stores,
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 7eed2898f61..7596bb5a435 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -223,8 +223,8 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAG) {
// both the return value and the argument for the next call being in %r0.
// Example:
// 1: <call1>
- // 2: %vregX = COPY %r0
- // 3: <use of %vregX>
+ // 2: %vreg = COPY %r0
+ // 3: <use of %vreg>
// 4: %r0 = ...
// 5: <call2>
// The scheduler would often swap 3 and 4, so an additional register is
@@ -234,12 +234,12 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAG) {
const MachineInstr *MI = DAG->SUnits[su].getInstr();
if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) ||
MI->readsRegister(Hexagon::V0, &TRI))) {
- // %vregX = COPY %r0
+ // %vreg = COPY %r0
VRegHoldingRet = MI->getOperand(0).getReg();
RetRegister = MI->getOperand(1).getReg();
LastUseOfRet = nullptr;
} else if (VRegHoldingRet && MI->readsVirtualRegister(VRegHoldingRet))
- // <use of %vregX>
+ // <use of %X>
LastUseOfRet = &DAG->SUnits[su];
else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI))
// %r0 = ...
diff --git a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
index 7258e818e72..f33655a16c2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -22,11 +22,11 @@
// This peephole pass optimizes these cases, for example
//
// It will transform the following pattern
-// %vreg0<def> = LEA_ADDRi64 %VRFrame, 4
-// %vreg1<def> = cvta_to_local_yes_64 %vreg0
+// %0<def> = LEA_ADDRi64 %VRFrame, 4
+// %1<def> = cvta_to_local_yes_64 %0
//
// into
-// %vreg1<def> = LEA_ADDRi64 %VRFrameLocal, 4
+// %1<def> = LEA_ADDRi64 %VRFrameLocal, 4
//
// %VRFrameLocal is the virtual register name of %SPL
//
diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index 2af1913db55..4c101f58601 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -62,11 +62,11 @@ namespace llvm {
/// BB#0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %vreg0<def> = COPY %f1; F8RC:%vreg0
-/// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
-/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
-/// BCC 76, %vreg5, <BB#2>; CRRC:%vreg5
+/// %0<def> = COPY %f1; F8RC:%0
+/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
+/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
+/// BCC 76, %5, <BB#2>; CRRC:%5
/// Successors according to CFG: BB#1(?%) BB#2(?%)
///
/// BB#1: derived from LLVM BB %entry
@@ -75,10 +75,10 @@ namespace llvm {
///
/// BB#2: derived from LLVM BB %entry
/// Predecessors according to CFG: BB#0 BB#1
-/// %vreg9<def> = PHI %vreg8, <BB#1>, %vreg0, <BB#0>;
-/// F8RC:%vreg9,%vreg8,%vreg0
+/// %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+/// F8RC:%9,%8,%0
/// <SNIP2>
-/// BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
+/// BCC 76, %5, <BB#4>; CRRC:%5
/// Successors according to CFG: BB#3(?%) BB#4(?%)
///
/// BB#3: derived from LLVM BB %entry
@@ -87,8 +87,8 @@ namespace llvm {
///
/// BB#4: derived from LLVM BB %entry
/// Predecessors according to CFG: BB#2 BB#3
-/// %vreg13<def> = PHI %vreg12, <BB#3>, %vreg2, <BB#2>;
-/// F8RC:%vreg13,%vreg12,%vreg2
+/// %13<def> = PHI %12, <BB#3>, %2, <BB#2>;
+/// F8RC:%13,%12,%2
/// <SNIP3>
/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
@@ -100,12 +100,12 @@ namespace llvm {
/// BB#0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %vreg0<def> = COPY %f1; F8RC:%vreg0
-/// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
-/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
+/// %0<def> = COPY %f1; F8RC:%0
+/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
+/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// <SNIP2>
-/// BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
+/// BCC 76, %5, <BB#4>; CRRC:%5
/// Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%)
/// BB#4(0x55555554 / 0x80000000 = 66.67%)
///
@@ -115,10 +115,10 @@ namespace llvm {
///
/// BB#4: derived from LLVM BB %entry
/// Predecessors according to CFG: BB#0 BB#1
-/// %vreg9<def> = PHI %vreg8, <BB#1>, %vreg0, <BB#0>;
-/// F8RC:%vreg9,%vreg8,%vreg0
-/// %vreg13<def> = PHI %vreg12, <BB#1>, %vreg2, <BB#0>;
-/// F8RC:%vreg13,%vreg12,%vreg2
+/// %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+/// F8RC:%9,%8,%0
+/// %13<def> = PHI %12, <BB#1>, %2, <BB#0>;
+/// F8RC:%13,%12,%2
/// <SNIP3>
/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index fd566634760..15cc1c76760 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2318,7 +2318,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
// ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
// BL8_NOP <ga:@func>,...
// ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
- // %vreg5<def> = COPY %x3; G8RC:%vreg5
+ // %5<def> = COPY %x3; G8RC:%5
if (SrcReg == PPC::X3) {
const MachineBasicBlock *MBB = MI.getParent();
MachineBasicBlock::const_instr_iterator II =
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index a8d98133afc..1ac7afe2cdc 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -585,9 +585,9 @@ bool PPCMIPeephole::simplifyCode(void) {
// We can eliminate RLDICL (e.g. for zero-extension)
// if all bits to clear are already zero in the input.
// This code assume following code sequence for zero-extension.
- // %vreg6<def> = COPY %vreg5:sub_32; (optional)
- // %vreg8<def> = IMPLICIT_DEF;
- // %vreg7<def,tied1> = INSERT_SUBREG %vreg8<tied0>, %vreg6, sub_32;
+ // %6<def> = COPY %5:sub_32; (optional)
+ // %8<def> = IMPLICIT_DEF;
+ // %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
if (!EnableZExtElimination) break;
if (MI.getOperand(2).getImm() != 0)
@@ -685,8 +685,8 @@ bool PPCMIPeephole::simplifyCode(void) {
DEBUG(dbgs() << "Optimizing LI to ADDI: ");
DEBUG(LiMI->dump());
- // There could be repeated registers in the PHI, e.g: %vreg1<def> =
- // PHI %vreg6, <BB#2>, %vreg8, <BB#3>, %vreg8, <BB#6>; So if we've
+ // There could be repeated registers in the PHI, e.g: %1<def> =
+ // PHI %6, <BB#2>, %8, <BB#3>, %8, <BB#6>; So if we've
// already replaced the def instruction, skip.
if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
continue;
diff --git a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 80b63b1c9df..4d001c0210d 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -90,21 +90,21 @@ protected:
// This pass is run after register coalescing, and so we're looking for
// a situation like this:
// ...
- // %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
- // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // %5<def> = COPY %9; VSLRC:%5,%9
+ // %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
+ // %rm<imp-use>; VSLRC:%5,%17,%16
// ...
- // %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
- // %rm<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
+ // %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
+ // %rm<imp-use>; VSLRC:%9,%17,%19
// ...
// Where we can eliminate the copy by changing from the A-type to the
// M-type instruction. Specifically, for this example, this means:
- // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
+ // %rm<imp-use>; VSLRC:%5,%17,%16
// is replaced by:
- // %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
- // %rm<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
- // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+ // %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
+ // %rm<imp-use>; VSLRC:%16,%18,%9
+ // and we remove: %5<def> = COPY %9; VSLRC:%5,%9
SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
@@ -150,13 +150,13 @@ protected:
// walking the MIs we may as well test liveness here.
//
// FIXME: There is a case that occurs in practice, like this:
- // %vreg9<def> = COPY %f1; VSSRC:%vreg9
+ // %9<def> = COPY %f1; VSSRC:%9
// ...
- // %vreg6<def> = COPY %vreg9; VSSRC:%vreg6,%vreg9
- // %vreg7<def> = COPY %vreg9; VSSRC:%vreg7,%vreg9
- // %vreg9<def,tied1> = XSMADDASP %vreg9<tied0>, %vreg1, %vreg4; VSSRC:
- // %vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg1, %vreg2; VSSRC:
- // %vreg7<def,tied1> = XSMADDASP %vreg7<tied0>, %vreg1, %vreg3; VSSRC:
+ // %6<def> = COPY %9; VSSRC:%6,%9
+ // %7<def> = COPY %9; VSSRC:%7,%9
+ // %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
+ // %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
+ // %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
// which prevents an otherwise-profitable transformation.
bool OtherUsers = false, KillsAddendSrc = false;
for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
@@ -177,11 +177,11 @@ protected:
// The transformation doesn't work well with things like:
- // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
- // unless vreg11 is also a kill, so skip when it is not,
+ // %5 = A-form-op %5, %11, %5;
+ // unless %11 is also a kill, so skip when it is not,
// and check operand 3 to see it is also a kill to handle the case:
- // %vreg5 = A-form-op %vreg5, %vreg5, %vreg11;
- // where vreg5 and vreg11 are both kills. This case would be skipped
+ // %5 = A-form-op %5, %5, %11;
+ // where %5 and %11 are both kills. This case would be skipped
// otherwise.
unsigned OldFMAReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a21145f0755..54523d7233e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -6948,10 +6948,10 @@ static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
// For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
// lowered this:
- // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
+ // (extract_vector_elt (v8f32 %1), Constant<6>)
// to:
// (extract_vector_elt (vector_shuffle<2,u,u,u>
- // (extract_subvector (v8f32 %vreg0), Constant<4>),
+ // (extract_subvector (v8f32 %0), Constant<4>),
// undef)
// Constant<0>)
// In this case the vector is the extract_subvector expression and the index
OpenPOWER on IntegriCloud