diff options
| author | Francis Visoiu Mistrih <francisvm@yahoo.com> | 2017-11-28 17:15:09 +0000 |
|---|---|---|
| committer | Francis Visoiu Mistrih <francisvm@yahoo.com> | 2017-11-28 17:15:09 +0000 |
| commit | 9d7bb0cb408e993181fc1b28986c7eb3495f28b9 (patch) | |
| tree | 117b65c086189d16940bde7b400f0fd4312d98de /llvm/lib/Target | |
| parent | 2803bfaf001241a98608c263a824a5f5ec542511 (diff) | |
| download | bcm5719-llvm-9d7bb0cb408e993181fc1b28986c7eb3495f28b9.tar.gz bcm5719-llvm-9d7bb0cb408e993181fc1b28986c7eb3495f28b9.zip | |
[CodeGen] Print register names in lowercase in both MIR and debug output
As part of the unification of the debug format and the MIR format,
always print registers as lowercase.
* Only debug printing is affected. It now follows MIR.
Differential Revision: https://reviews.llvm.org/D40417
llvm-svn: 319187
Diffstat (limited to 'llvm/lib/Target')
40 files changed, 213 insertions, 213 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index c4aa6bf139d..bf5f0f624af 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2801,11 +2801,11 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( LiveIntervals *LIS) const { // This is a bit of a hack. Consider this instruction: // - // %vreg0<def> = COPY %SP; GPR64all:%vreg0 + // %vreg0<def> = COPY %sp; GPR64all:%vreg0 // // We explicitly chose GPR64all for the virtual register so such a copy might // be eliminated by RegisterCoalescer. However, that may not be possible, and - // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all + // %vreg0 may even spill. We can't spill %sp, and since it is in the GPR64all // register class, TargetInstrInfo::foldMemoryOperand() is going to try. // // To prevent that, we are going to constrain the %vreg0 register class here. @@ -2830,12 +2830,12 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( // Handle the case where a copy is being spilled or filled but the source // and destination register class don't match. For example: // - // %vreg0<def> = COPY %XZR; GPR64common:%vreg0 + // %vreg0<def> = COPY %xzr; GPR64common:%vreg0 // // In this case we can still safely fold away the COPY and generate the // following spill code: // - // STRXui %XZR, <fi#0> + // STRXui %xzr, <fi#0> // // This also eliminates spilled cross register class COPYs (e.g. between x and // d regs) of the same size. For example: @@ -2886,12 +2886,12 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( // Handle cases like spilling def of: // - // %vreg0:sub_32<def,read-undef> = COPY %WZR; GPR64common:%vreg0 + // %vreg0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%vreg0 // // where the physical register source can be widened and stored to the full // virtual reg destination stack slot, in this case producing: // - // STRXui %XZR, <fi#0> + // STRXui %xzr, <fi#0> // if (IsSpill && DstMO.isUndef() && TargetRegisterInfo::isPhysicalRegister(SrcReg)) { diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index c32b0dbca9b..de912244eeb 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -830,8 +830,8 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, if (SExtIdx != -1) { // Generate the sign extension for the proper result of the ldp. // I.e., with X1, that would be: - // %W1<def> = KILL %W1, %X1<imp-def> - // %X1<def> = SBFMXri %X1<kill>, 0, 31 + // %w1<def> = KILL %w1, %x1<imp-def> + // %x1<def> = SBFMXri %x1<kill>, 0, 31 MachineOperand &DstMO = MIB->getOperand(SExtIdx); // Right now, DstMO has the extended register, since it comes from an // extended opcode. diff --git a/llvm/lib/Target/AMDGPU/CaymanInstructions.td b/llvm/lib/Target/AMDGPU/CaymanInstructions.td index 0ba5acad680..429d28e753c 100644 --- a/llvm/lib/Target/AMDGPU/CaymanInstructions.td +++ b/llvm/lib/Target/AMDGPU/CaymanInstructions.td @@ -144,8 +144,8 @@ def VTX_READ_32_cm // to be caused by ALU instructions in the next instruction group that wrote // to the $src_gpr registers of the VTX_READ. // e.g. - // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24 - // %T2_X<def> = MOV %ZERO + // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24 + // %t2_x<def> = MOV %zero //Adding this constraint prevents this from happening. let Constraints = "$src_gpr.ptr = $dst_gpr"; } diff --git a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td index bccad826d18..c25980eef85 100644 --- a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td +++ b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td @@ -212,8 +212,8 @@ def VTX_READ_32_eg // to be caused by ALU instructions in the next instruction group that wrote // to the $src_gpr registers of the VTX_READ. // e.g. - // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24 - // %T2_X<def> = MOV %ZERO + // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24 + // %t2_x<def> = MOV %zero //Adding this constraint prevents this from happening. let Constraints = "$src_gpr.ptr = $dst_gpr"; } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 0fa6712527f..2c52e16892c 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -971,9 +971,9 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { // Prevent folding operands backwards in the function. For example, // the COPY opcode must not be replaced by 1 in this example: // - // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3 + // %vreg3<def> = COPY %vgpr0; VGPR_32:%vreg3 // ... - // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use> + // %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use> MachineOperand &Dst = MI.getOperand(0); if (Dst.isReg() && !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 2561f7f09fe..2c7ef096d9c 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -6600,7 +6600,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node, I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) return; - // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. + // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. // Note that subregs are packed, i.e. Lane==0 is the first bit set // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit // set, etc. diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp index 15210d2a31c..027974311da 100644 --- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -21,31 +21,31 @@ /// EXEC to update the predicates. /// /// For example: -/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 -/// %SGPR0 = SI_IF %VCC -/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 -/// %SGPR0 = SI_ELSE %SGPR0 -/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 -/// SI_END_CF %SGPR0 +/// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2 +/// %sgpr0 = SI_IF %vcc +/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 +/// %sgpr0 = SI_ELSE %sgpr0 +/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0 +/// SI_END_CF %sgpr0 /// /// becomes: /// -/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask -/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask +/// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask +/// %sgpr0 = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask /// S_CBRANCH_EXECZ label0 // This instruction is an optional /// // optimization which allows us to /// // branch if all the bits of /// // EXEC are zero. -/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch +/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch /// /// label0: -/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block -/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask +/// %sgpr0 = S_OR_SAVEEXEC_B64 %exec // Restore the exec mask for the Then block +/// %exec = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask /// S_BRANCH_EXECZ label1 // Use our branch optimization /// // instruction again. -/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block +/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block /// label1: -/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits +/// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits //===----------------------------------------------------------------------===// #include "AMDGPU.h" diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp index d60734ab144..e9a13b9802b 100644 --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -1832,12 +1832,12 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, if (!HasFP) { if (SavedRegs.test(ARM::R7)) { --RegDeficit; - DEBUG(dbgs() << "%R7 is saved low register, RegDeficit = " + DEBUG(dbgs() << "%r7 is saved low register, RegDeficit = " << RegDeficit << "\n"); } else { AvailableRegs.push_back(ARM::R7); DEBUG(dbgs() - << "%R7 is non-saved low register, adding to AvailableRegs\n"); + << "%r7 is non-saved low register, adding to AvailableRegs\n"); } } @@ -1859,11 +1859,11 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, MF.getFrameInfo().isReturnAddressTaken())) { if (SavedRegs.test(ARM::LR)) { --RegDeficit; - DEBUG(dbgs() << "%LR is saved register, RegDeficit = " << RegDeficit + DEBUG(dbgs() << "%lr is saved register, RegDeficit = " << RegDeficit << "\n"); } else { AvailableRegs.push_back(ARM::LR); - DEBUG(dbgs() << "%LR is not saved, adding to AvailableRegs\n"); + DEBUG(dbgs() << "%lr is not saved, adding to AvailableRegs\n"); } } diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index e989c2fce5d..2b63e0c842f 100644 --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1697,7 +1697,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB, if (OddReg == EvenReg && EvenDeadKill) { // If the two source operands are the same, the kill marker is // probably on the first one. e.g. - // t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0 + // t2STRDi8 %r5<kill>, %r5, %r9<kill>, 0, 14, %reg0 EvenDeadKill = false; OddDeadKill = true; } diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp index 00db408b8ed..9ca7e5f0a3c 100644 --- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp @@ -368,7 +368,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap, } } // Defs and clobbers can overlap, e.g. - // %D0<def,dead> = COPY %vreg5, %R0<imp-def>, %R1<imp-def> + // %d0<def,dead> = COPY %vreg5, %r0<imp-def>, %r1<imp-def> for (RegisterRef R : Defs) Clobbers.erase(R); diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp index ed6c40deeba..e7c3290d151 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -1974,7 +1974,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI, { const MachineOperand &VO = MI.getOperand(1); // The operand of CONST32 can be a blockaddress, e.g. - // %vreg0<def> = CONST32 <blockaddress(@eat, %L)> + // %vreg0<def> = CONST32 <blockaddress(@eat, %l)> // Do this check for all instructions for safety. if (!VO.isImm()) return false; @@ -3144,7 +3144,7 @@ bool HexagonConstEvaluator::rewriteHexBranch(MachineInstr &BrI, BrI.setDesc(JD); while (BrI.getNumOperands() > 0) BrI.RemoveOperand(0); - // This ensures that all implicit operands (e.g. %R31<imp-def>, etc) + // This ensures that all implicit operands (e.g. %r31<imp-def>, etc) // are present in the rewritten branch. for (auto &Op : NI->operands()) BrI.addOperand(Op); diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp index a27993116d8..2dfd7b7f9c8 100644 --- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -351,11 +351,11 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1, // kill flag for a register (a removeRegisterKilled() analogous to // addRegisterKilled) that handles aliased register correctly. // * or has a killed aliased register use of I1's use reg - // %D4<def> = A2_tfrpi 16 - // %R6<def> = A2_tfr %R9 - // %R8<def> = KILL %R8, %D4<imp-use,kill> + // %d4<def> = A2_tfrpi 16 + // %r6<def> = A2_tfr %r9 + // %r8<def> = KILL %r8, %d4<imp-use,kill> // If we want to move R6 = across the KILL instruction we would have - // to remove the %D4<imp-use,kill> operand. For now, we are + // to remove the %d4<imp-use,kill> operand. For now, we are // conservative and disallow the move. // we can't move I1 across it. if (MI.isDebugValue()) { diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index bec759a826d..b2244107ac4 100644 --- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -27,8 +27,8 @@ // // %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1 // %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0 -// J2_jumpt %vreg41<kill>, <BB#5>, %PC<imp-def,dead> -// J2_jump <BB#4>, %PC<imp-def,dead> +// J2_jumpt %vreg41<kill>, <BB#5>, %pc<imp-def,dead> +// J2_jump <BB#4>, %pc<imp-def,dead> // Successors according to CFG: BB#4(62) BB#5(62) // // BB#4: derived from LLVM BB %if.then @@ -42,8 +42,8 @@ // %vreg12<def> = PHI %vreg6, <BB#3>, %vreg11, <BB#4> // %vreg13<def> = A2_addp %vreg7, %vreg12 // %vreg42<def> = C2_cmpeqi %vreg9, 10 -// J2_jumpf %vreg42<kill>, <BB#3>, %PC<imp-def,dead> -// J2_jump <BB#6>, %PC<imp-def,dead> +// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead> +// J2_jump <BB#6>, %pc<imp-def,dead> // Successors according to CFG: BB#6(4) BB#3(124) // // would become: @@ -55,8 +55,8 @@ // %vreg46<def> = PS_pselect %vreg41, %vreg6, %vreg11 // %vreg13<def> = A2_addp %vreg7, %vreg46 // %vreg42<def> = C2_cmpeqi %vreg9, 10 -// J2_jumpf %vreg42<kill>, <BB#3>, %PC<imp-def,dead> -// J2_jump <BB#6>, %PC<imp-def,dead> +// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead> +// J2_jump <BB#6>, %pc<imp-def,dead> // Successors according to CFG: BB#6 BB#3 #include "Hexagon.h" diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 5ca8b0f30e0..56171f22148 100644 --- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -1720,7 +1720,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) { MachineOperand &MO = PredDef->getOperand(i); if (MO.isReg()) { // Skip all implicit references. In one case there was: - // %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %USR<imp-use> + // %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %usr<imp-use> if (MO.isImplicit()) continue; if (MO.isUse()) { diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 3c0b3061688..4cdfd09c095 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1616,8 +1616,8 @@ DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState( } // Inspired by this pair: -// %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0] -// S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1] +// %r13<def> = L2_loadri_io %r29, 136; mem:LD4[FixedStack0] +// S2_storeri_io %r29, 132, %r1<kill>; flags: mem:ST4[FixedStack1] // Currently AA considers the addresses in these instructions to be aliasing. bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint( MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const { @@ -3516,7 +3516,7 @@ HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup( case Hexagon::EH_RETURN_JMPR: case Hexagon::PS_jmpret: // jumpr r31 - // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>. + // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>. DstReg = MI.getOperand(0).getReg(); if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)) return HexagonII::HSIG_L2; @@ -3706,7 +3706,7 @@ HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup( case Hexagon::C2_cmovenewif: // if ([!]P0[.new]) Rd = #0 // Actual form: - // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>; + // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>; DstReg = MI.getOperand(0).getReg(); SrcReg = MI.getOperand(1).getReg(); if (isIntRegForSubInst(DstReg) && diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp index cf7a5fff149..51cf6bbd8a2 100644 --- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -129,9 +129,9 @@ static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII, // using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic // before the callsite of this function // But we can not as it comes in the following fashion. - // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill> - // %R0<def> = KILL %R0, %D0<imp-use,kill> - // %P0<def> = CMPEQri %R0<kill>, 0 + // %d0<def> = Hexagon_S2_lsr_r_p %d0<kill>, %r2<kill> + // %r0<def> = KILL %r0, %d0<imp-use,kill> + // %p0<def> = CMPEQri %r0<kill>, 0 // Hence, we need to check if it's a KILL instruction. if (II->getOpcode() == TargetOpcode::KILL) return false; @@ -193,9 +193,9 @@ static bool commonChecksToProhibitNewValueJump(bool afterRA, // to new value jump. If they are in the path, bail out. // KILL sets kill flag on the opcode. It also sets up a // single register, out of pair. - // %D0<def> = S2_lsr_r_p %D0<kill>, %R2<kill> - // %R0<def> = KILL %R0, %D0<imp-use,kill> - // %P0<def> = C2_cmpeqi %R0<kill>, 0 + // %d0<def> = S2_lsr_r_p %d0<kill>, %r2<kill> + // %r0<def> = KILL %r0, %d0<imp-use,kill> + // %p0<def> = C2_cmpeqi %r0<kill>, 0 // PHI can be anything after RA. // COPY can remateriaze things in between feeder, compare and nvj. if (MII->getOpcode() == TargetOpcode::KILL || diff --git a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp index d794f83aaa4..0ef0e78c524 100644 --- a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp +++ b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp @@ -20,12 +20,12 @@ // ... // %vreg16<def> = NOT_p %vreg15<kill> // ... -// JMP_c %vreg16<kill>, <BB#1>, %PC<imp-def,dead> +// JMP_c %vreg16<kill>, <BB#1>, %pc<imp-def,dead> // // Into // %vreg15<def> = CMPGTrr %vreg6, %vreg2; // ... -// JMP_cNot %vreg15<kill>, <BB#1>, %PC<imp-def,dead>; +// JMP_cNot %vreg15<kill>, <BB#1>, %pc<imp-def,dead>; // // Note: The peephole pass makes the instrucstions like // %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill> diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp index 7ec4c34504b..7eed2898f61 100644 --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -220,21 +220,21 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAG) { shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1])) DAG->SUnits[su].addPred(SDep(&DAG->SUnits[su-1], SDep::Barrier)); // Prevent redundant register copies between two calls, which are caused by - // both the return value and the argument for the next call being in %R0. + // both the return value and the argument for the next call being in %r0. // Example: // 1: <call1> - // 2: %VregX = COPY %R0 - // 3: <use of %VregX> - // 4: %R0 = ... + // 2: %vregX = COPY %r0 + // 3: <use of %vregX> + // 4: %r0 = ... // 5: <call2> // The scheduler would often swap 3 and 4, so an additional register is // needed. This code inserts a Barrier dependence between 3 & 4 to prevent - // this. The same applies for %D0 and %V0/%W0, which are also handled. + // this. The same applies for %d0 and %v0/%w0, which are also handled. else if (SchedRetvalOptimization) { const MachineInstr *MI = DAG->SUnits[su].getInstr(); if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) || MI->readsRegister(Hexagon::V0, &TRI))) { - // %vregX = COPY %R0 + // %vregX = COPY %r0 VRegHoldingRet = MI->getOperand(0).getReg(); RetRegister = MI->getOperand(1).getReg(); LastUseOfRet = nullptr; @@ -242,7 +242,7 @@ void HexagonSubtarget::CallMutation::apply(ScheduleDAGInstrs *DAG) { // <use of %vregX> LastUseOfRet = &DAG->SUnits[su]; else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI)) - // %R0 = ... + // %r0 = ... DAG->SUnits[su].addPred(SDep(LastUseOfRet, SDep::Barrier)); } } diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index deb46f01c28..a8c5dea0d9e 100644 --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -772,8 +772,8 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI, // If data definition is because of implicit definition of the register, // do not newify the store. Eg. - // %R9<def> = ZXTH %R12, %D6<imp-use>, %R12<imp-def> - // S2_storerh_io %R8, 2, %R12<kill>; mem:ST2[%scevgep343] + // %r9<def> = ZXTH %r12, %d6<imp-use>, %r12<imp-def> + // S2_storerh_io %r8, 2, %r12<kill>; mem:ST2[%scevgep343] for (auto &MO : PacketMI.operands()) { if (MO.isRegMask() && MO.clobbersPhysReg(DepReg)) return false; @@ -787,8 +787,8 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI, // Handle imp-use of super reg case. There is a target independent side // change that should prevent this situation but I am handling it for // just-in-case. For example, we cannot newify R2 in the following case: - // %R3<def> = A2_tfrsi 0; - // S2_storeri_io %R0<kill>, 0, %R2<kill>, %D1<imp-use,kill>; + // %r3<def> = A2_tfrsi 0; + // S2_storeri_io %r0<kill>, 0, %r2<kill>, %d1<imp-use,kill>; for (auto &MO : MI.operands()) { if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == DepReg) return false; @@ -892,12 +892,12 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr &MI, // Go through the packet instructions and search for an anti dependency between // them and DepReg from MI. Consider this case: // Trying to add -// a) %R1<def> = TFRI_cdNotPt %P3, 2 +// a) %r1<def> = TFRI_cdNotPt %p3, 2 // to this packet: // { -// b) %P0<def> = C2_or %P3<kill>, %P0<kill> -// c) %P3<def> = C2_tfrrp %R23 -// d) %R1<def> = C2_cmovenewit %P3, 4 +// b) %p0<def> = C2_or %p3<kill>, %p0<kill> +// c) %p3<def> = C2_tfrrp %r23 +// d) %r1<def> = C2_cmovenewit %p3, 4 // } // The P3 from a) and d) will be complements after // a)'s P3 is converted to .new form @@ -962,11 +962,11 @@ bool HexagonPacketizerList::arePredicatesComplements(MachineInstr &MI1, // One corner case deals with the following scenario: // Trying to add - // a) %R24<def> = A2_tfrt %P0, %R25 + // a) %r24<def> = A2_tfrt %p0, %r25 // to this packet: // { - // b) %R25<def> = A2_tfrf %P0, %R24 - // c) %P0<def> = C2_cmpeqi %R26, 1 + // b) %r25<def> = A2_tfrf %p0, %r24 + // c) %p0<def> = C2_cmpeqi %r26, 1 // } // // On general check a) and b) are complements, but presence of c) will @@ -1543,7 +1543,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { // There are certain anti-dependencies that cannot be ignored. // Specifically: - // J2_call ... %R0<imp-def> ; SUJ + // J2_call ... %r0<imp-def> ; SUJ // R0 = ... ; SUI // Those cannot be packetized together, since the call will observe // the effect of the assignment to R0. diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp index a39b178805e..7dd89c6eb8e 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp @@ -272,7 +272,7 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) { case Hexagon::J2_jumpr: case Hexagon::PS_jmpret: // jumpr r31 - // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>. + // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>. DstReg = MCI.getOperand(0).getReg(); if (Hexagon::R31 == DstReg) return HexagonII::HSIG_L2; @@ -471,7 +471,7 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) { case Hexagon::C2_cmovenewif: // if ([!]P0[.new]) Rd = #0 // Actual form: - // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>; + // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>; DstReg = MCI.getOperand(0).getReg(); // Rd PredReg = MCI.getOperand(1).getReg(); // P0 if (HexagonMCInstrInfo::isIntRegForSubInst(DstReg) && diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp index ea589c7a82a..3a4a41ccb40 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp @@ -113,9 +113,9 @@ bool llvm::HexagonMCShuffle(MCContext &Context, bool Fatal, if (!HexagonMCInstrInfo::bundleSize(MCB)) { // There once was a bundle: - // BUNDLE %D2<imp-def>, %R4<imp-def>, %R5<imp-def>, %D7<imp-def>, ... - // * %D2<def> = IMPLICIT_DEF; flags: - // * %D7<def> = IMPLICIT_DEF; flags: + // BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ... + // * %d2<def> = IMPLICIT_DEF; flags: + // * %d7<def> = IMPLICIT_DEF; flags: // After the IMPLICIT_DEFs were removed by the asm printer, the bundle // became empty. DEBUG(dbgs() << "Skipping empty bundle"); @@ -137,9 +137,9 @@ llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII, if (!HexagonMCInstrInfo::bundleSize(MCB)) { // There once was a bundle: - // BUNDLE %D2<imp-def>, %R4<imp-def>, %R5<imp-def>, %D7<imp-def>, ... - // * %D2<def> = IMPLICIT_DEF; flags: - // * %D7<def> = IMPLICIT_DEF; flags: + // BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ... + // * %d2<def> = IMPLICIT_DEF; flags: + // * %d7<def> = IMPLICIT_DEF; flags: // After the IMPLICIT_DEFs were removed by the asm printer, the bundle // became empty. DEBUG(dbgs() << "Skipping empty bundle"); diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp index 878497ca76f..74394d0e84c 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -480,7 +480,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc, MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc)); // For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an - // immediate 0 as an operand and requires the removal of it's %RA<imp-def> + // immediate 0 as an operand and requires the removal of it's %ra<imp-def> // implicit operand as copying the implicit operations of the instructio we're // looking at will give us the correct flags. if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 || diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp index 7fee5ff1bf8..855406330b9 100644 --- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -521,7 +521,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return LowerPATCHPOINT(SM, *MI); case PPC::MoveGOTtoLR: { - // Transform %LR = MoveGOTtoLR + // Transform %lr = MoveGOTtoLR // Into this: bl _GLOBAL_OFFSET_TABLE_@local-4 // _GLOBAL_OFFSET_TABLE_@local-4 (instruction preceding // _GLOBAL_OFFSET_TABLE_) has exactly one instruction: @@ -542,7 +542,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::MovePCtoLR: case PPC::MovePCtoLR8: { - // Transform %LR = MovePCtoLR + // Transform %lr = MovePCtoLR // Into this, where the label is the PIC base: // bl L1$pb // L1$pb: @@ -560,9 +560,9 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::UpdateGBR: { - // Transform %Rd = UpdateGBR(%Rt, %Ri) - // Into: lwz %Rt, .L0$poff - .L0$pb(%Ri) - // add %Rd, %Rt, %Ri + // Transform %rd = UpdateGBR(%rt, %ri) + // Into: lwz %rt, .L0$poff - .L0$pb(%ri) + // add %rd, %rt, %ri // Get the offset from the GOT Base Register to the GOT LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); MCSymbol *PICOffset = @@ -577,7 +577,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { const MCOperand TR = TmpInst.getOperand(1); const MCOperand PICR = TmpInst.getOperand(0); - // Step 1: lwz %Rt, .L$poff - .L$pb(%Ri) + // Step 1: lwz %rt, .L$poff - .L$pb(%ri) TmpInst.getOperand(1) = MCOperand::createExpr(MCBinaryExpr::createSub(Exp, PB, OutContext)); TmpInst.getOperand(0) = TR; @@ -592,7 +592,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::LWZtoc: { - // Transform %R3 = LWZtoc <ga:@min1>, %R2 + // Transform %r3 = LWZtoc <ga:@min1>, %r2 LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); // Change the opcode to LWZ, and the global address operand to be a @@ -636,7 +636,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { case PPC::LDtocCPT: case PPC::LDtocBA: case PPC::LDtoc: { - // Transform %X3 = LDtoc <ga:@min1>, %X2 + // Transform %x3 = LDtoc <ga:@min1>, %x2 LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); // Change the opcode to LD, and the global address operand to be a @@ -667,7 +667,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::ADDIStocHA: { - // Transform %Xd = ADDIStocHA %X2, <ga:@sym> + // Transform %xd = ADDIStocHA %x2, <ga:@sym> LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); // Change the opcode to ADDIS8. If the global address is external, has @@ -714,7 +714,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::LDtocL: { - // Transform %Xd = LDtocL <ga:@sym>, %Xs + // Transform %xd = LDtocL <ga:@sym>, %xs LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); // Change the opcode to LD. If the global address is external, has @@ -757,7 +757,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::ADDItocL: { - // Transform %Xd = ADDItocL %Xs, <ga:@sym> + // Transform %xd = ADDItocL %xs, <ga:@sym> LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); // Change the opcode to ADDI8. If the global address is external, then @@ -788,8 +788,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::ADDISgotTprelHA: { - // Transform: %Xd = ADDISgotTprelHA %X2, <ga:@sym> - // Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha + // Transform: %xd = ADDISgotTprelHA %x2, <ga:@sym> + // Into: %xd = ADDIS8 %x2, sym@got@tlsgd@ha assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); @@ -805,7 +805,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { } case PPC::LDgotTprelL: case PPC::LDgotTprelL32: { - // Transform %Xd = LDgotTprelL <ga:@sym>, %Xs + // Transform %xd = LDgotTprelL <ga:@sym>, %xs LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin); // Change the opcode to LD. @@ -866,8 +866,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::ADDIStlsgdHA: { - // Transform: %Xd = ADDIStlsgdHA %X2, <ga:@sym> - // Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha + // Transform: %xd = ADDIStlsgdHA %x2, <ga:@sym> + // Into: %xd = ADDIS8 %x2, sym@got@tlsgd@ha assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); @@ -882,11 +882,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::ADDItlsgdL: - // Transform: %Xd = ADDItlsgdL %Xs, <ga:@sym> - // Into: %Xd = ADDI8 %Xs, sym@got@tlsgd@l + // Transform: %xd = ADDItlsgdL %xs, <ga:@sym> + // Into: %xd = ADDI8 %xs, sym@got@tlsgd@l case PPC::ADDItlsgdL32: { - // Transform: %Rd = ADDItlsgdL32 %Rs, <ga:@sym> - // Into: %Rd = ADDI %Rs, sym@got@tlsgd + // Transform: %rd = ADDItlsgdL32 %rs, <ga:@sym> + // Into: %rd = ADDI %rs, sym@got@tlsgd const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); @@ -902,17 +902,17 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::GETtlsADDR: - // Transform: %X3 = GETtlsADDR %X3, <ga:@sym> + // Transform: %x3 = GETtlsADDR %x3, <ga:@sym> // Into: BL8_NOP_TLS __tls_get_addr(sym at tlsgd) case PPC::GETtlsADDR32: { - // Transform: %R3 = GETtlsADDR32 %R3, <ga:@sym> + // Transform: %r3 = GETtlsADDR32 %r3, <ga:@sym> // Into: BL_TLS __tls_get_addr(sym at tlsgd)@PLT EmitTlsCall(MI, MCSymbolRefExpr::VK_PPC_TLSGD); return; } case PPC::ADDIStlsldHA: { - // Transform: %Xd = ADDIStlsldHA %X2, <ga:@sym> - // Into: %Xd = ADDIS8 %X2, sym@got@tlsld@ha + // Transform: %xd = ADDIStlsldHA %x2, <ga:@sym> + // Into: %xd = ADDIS8 %x2, sym@got@tlsld@ha assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); @@ -927,11 +927,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::ADDItlsldL: - // Transform: %Xd = ADDItlsldL %Xs, <ga:@sym> - // Into: %Xd = ADDI8 %Xs, sym@got@tlsld@l + // Transform: %xd = ADDItlsldL %xs, <ga:@sym> + // Into: %xd = ADDI8 %xs, sym@got@tlsld@l case PPC::ADDItlsldL32: { - // Transform: %Rd = ADDItlsldL32 %Rs, <ga:@sym> - // Into: %Rd = ADDI %Rs, sym@got@tlsld + // Transform: %rd = ADDItlsldL32 %rs, <ga:@sym> + // Into: %rd = ADDI %rs, sym@got@tlsld const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); @@ -947,20 +947,20 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::GETtlsldADDR: - // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym> + // Transform: %x3 = GETtlsldADDR %x3, <ga:@sym> // Into: BL8_NOP_TLS __tls_get_addr(sym at tlsld) case PPC::GETtlsldADDR32: { - // Transform: %R3 = GETtlsldADDR32 %R3, <ga:@sym> + // Transform: %r3 = GETtlsldADDR32 %r3, <ga:@sym> // Into: BL_TLS __tls_get_addr(sym at tlsld)@PLT EmitTlsCall(MI, MCSymbolRefExpr::VK_PPC_TLSLD); return; } case PPC::ADDISdtprelHA: - // Transform: %Xd = ADDISdtprelHA %Xs, <ga:@sym> - // Into: %Xd = ADDIS8 %Xs, sym@dtprel@ha + // Transform: %xd = ADDISdtprelHA %xs, <ga:@sym> + // Into: %xd = ADDIS8 %xs, sym@dtprel@ha case PPC::ADDISdtprelHA32: { - // Transform: %Rd = ADDISdtprelHA32 %Rs, <ga:@sym> - // Into: %Rd = ADDIS %Rs, sym@dtprel@ha + // Transform: %rd = ADDISdtprelHA32 %rs, <ga:@sym> + // Into: %rd = ADDIS %rs, sym@dtprel@ha const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); @@ -976,11 +976,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } case PPC::ADDIdtprelL: - // Transform: %Xd = ADDIdtprelL %Xs, <ga:@sym> - // Into: %Xd = ADDI8 %Xs, sym@dtprel@l + // Transform: %xd = ADDIdtprelL %xs, <ga:@sym> + // Into: %xd = ADDI8 %xs, sym@dtprel@l case PPC::ADDIdtprelL32: { - // Transform: %Rd = ADDIdtprelL32 %Rs, <ga:@sym> - // Into: %Rd = ADDI %Rs, sym@dtprel@l + // Transform: %rd = ADDIdtprelL32 %rs, <ga:@sym> + // Into: %rd = ADDI %rs, sym@dtprel@l const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); @@ -997,8 +997,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { case PPC::MFOCRF: case PPC::MFOCRF8: if (!Subtarget->hasMFOCRF()) { - // Transform: %R3 = MFOCRF %CR7 - // Into: %R3 = MFCR ;; cr7 + // Transform: %r3 = MFOCRF %cr7 + // Into: %r3 = MFCR ;; cr7 unsigned NewOpcode = MI->getOpcode() == PPC::MFOCRF ? PPC::MFCR : PPC::MFCR8; OutStreamer->AddComment(PPCInstPrinter:: @@ -1011,8 +1011,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { case PPC::MTOCRF: case PPC::MTOCRF8: if (!Subtarget->hasMFOCRF()) { - // Transform: %CR7 = MTOCRF %R3 - // Into: MTCRF mask, %R3 ;; cr7 + // Transform: %cr7 = MTOCRF %r3 + // Into: MTCRF mask, %r3 ;; cr7 unsigned NewOpcode = MI->getOpcode() == PPC::MTOCRF ? PPC::MTCRF : PPC::MTCRF8; unsigned Mask = 0x80 >> OutContext.getRegisterInfo() diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp index 6e1cd1323e6..2af1913db55 100644 --- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp @@ -60,11 +60,11 @@ namespace llvm { /// expands to the following machine code: /// /// BB#0: derived from LLVM BB %entry -/// Live Ins: %F1 %F3 %X6 +/// Live Ins: %f1 %f3 %x6 /// <SNIP1> -/// %vreg0<def> = COPY %F1; F8RC:%vreg0 +/// %vreg0<def> = COPY %f1; F8RC:%vreg0 /// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4 -/// %vreg8<def> = LXSDX %ZERO8, %vreg7<kill>, %RM<imp-use>; +/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>; /// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7 /// BCC 76, %vreg5, <BB#2>; CRRC:%vreg5 /// Successors according to CFG: BB#1(?%) BB#2(?%) @@ -90,7 +90,7 @@ namespace llvm { /// %vreg13<def> = PHI %vreg12, <BB#3>, %vreg2, <BB#2>; /// F8RC:%vreg13,%vreg12,%vreg2 /// <SNIP3> -/// BLR8 %LR8<imp-use>, %RM<imp-use>, %F1<imp-use> +/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use> /// /// When this pattern is detected, branch coalescing will try to collapse /// it by moving code in BB#2 to BB#0 and/or BB#4 and removing BB#3. @@ -98,11 +98,11 @@ namespace llvm { /// If all conditions are meet, IR should collapse to: /// /// BB#0: derived from LLVM BB %entry -/// Live Ins: %F1 %F3 %X6 +/// Live Ins: %f1 %f3 %x6 /// <SNIP1> -/// %vreg0<def> = COPY %F1; F8RC:%vreg0 +/// %vreg0<def> = COPY %f1; F8RC:%vreg0 /// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4 -/// %vreg8<def> = LXSDX %ZERO8, %vreg7<kill>, %RM<imp-use>; +/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>; /// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7 /// <SNIP2> /// BCC 76, %vreg5, <BB#4>; CRRC:%vreg5 @@ -120,7 +120,7 @@ namespace llvm { /// %vreg13<def> = PHI %vreg12, <BB#1>, %vreg2, <BB#0>; /// F8RC:%vreg13,%vreg12,%vreg2 /// <SNIP3> -/// BLR8 %LR8<imp-use>, %RM<imp-use>, %F1<imp-use> +/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use> /// /// Branch Coalescing does not split blocks, it moves everything in the same /// direction ensuring it does not break use/definition semantics. diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index f34c2cd4285..402e29cdff7 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -1991,9 +1991,9 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) { // or externally available linkage, a non-local function address, or a // jump table address (not yet needed), or if we are generating code // for large code model, we generate: - // LDtocL(GV, ADDIStocHA(%X2, GV)) + // LDtocL(GV, ADDIStocHA(%x2, GV)) // Otherwise we generate: - // ADDItocL(ADDIStocHA(%X2, GV), GV) + // ADDItocL(ADDIStocHA(%x2, GV), GV) // Either way, start with the ADDIStocHA: unsigned HighPartReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA), diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 531b95a662e..6289765c6b8 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -3218,9 +3218,9 @@ void PPCDAGToDAGISel::Select(SDNode *N) { // The first source operand is a TargetGlobalAddress or a TargetJumpTable. // If it must be toc-referenced according to PPCSubTarget, we generate: - // LDtocL(<ga:@sym>, ADDIStocHA(%X2, <ga:@sym>)) + // LDtocL(<ga:@sym>, ADDIStocHA(%x2, <ga:@sym>)) // Otherwise we generate: - // ADDItocL(ADDIStocHA(%X2, <ga:@sym>), <ga:@sym>) + // ADDItocL(ADDIStocHA(%x2, <ga:@sym>), <ga:@sym>) SDValue GA = N->getOperand(0); SDValue TOCbase = N->getOperand(1); SDNode *Tmp = CurDAG->getMachineNode(PPC::ADDIStocHA, dl, MVT::i64, diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 22dd56b3338..adf77fa0646 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -262,7 +262,7 @@ namespace llvm { /// local dynamic TLS on PPC32. PPC32_PICGOT, - /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec + /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec /// TLS model, produces an ADDIS8 instruction that adds the GOT /// base to sym\@got\@tprel\@ha. ADDIS_GOT_TPREL_HA, @@ -281,18 +281,18 @@ namespace llvm { /// TLS sequence. ADD_TLS, - /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS + /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS /// model, produces an ADDIS8 instruction that adds the GOT base /// register to sym\@got\@tlsgd\@ha. ADDIS_TLSGD_HA, - /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS + /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS /// model, produces an ADDI8 instruction that adds G8RReg to /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by /// ADDIS_TLSGD_L_ADDR until after register assignment. ADDI_TLSGD_L, - /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS + /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by /// ADDIS_TLSGD_L_ADDR until after register assignment. GET_TLS_ADDR, @@ -302,18 +302,18 @@ namespace llvm { /// register assignment. ADDI_TLSGD_L_ADDR, - /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS + /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS /// model, produces an ADDIS8 instruction that adds the GOT base /// register to sym\@got\@tlsld\@ha. ADDIS_TLSLD_HA, - /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS + /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS /// model, produces an ADDI8 instruction that adds G8RReg to /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by /// ADDIS_TLSLD_L_ADDR until after register assignment. ADDI_TLSLD_L, - /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS + /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by /// ADDIS_TLSLD_L_ADDR until after register assignment. GET_TLSLD_ADDR, @@ -323,7 +323,7 @@ namespace llvm { /// following register assignment. ADDI_TLSLD_L_ADDR, - /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS + /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS /// model, produces an ADDIS8 instruction that adds X3 to /// sym\@dtprel\@ha. ADDIS_DTPREL_HA, diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index f25b929c808..a035ec621b6 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -2315,10 +2315,10 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, // For a method return value, we check the ZExt/SExt flags in attribute. // We assume the following code sequence for method call. - // ADJCALLSTACKDOWN 32, %R1<imp-def,dead>, %R1<imp-use> + // ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use> // BL8_NOP <ga:@func>,... - // ADJCALLSTACKUP 32, 0, %R1<imp-def,dead>, %R1<imp-use> - // %vreg5<def> = COPY %X3; G8RC:%vreg5 + // ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use> + // %vreg5<def> = COPY %x3; G8RC:%vreg5 if (SrcReg == PPC::X3) { const MachineBasicBlock *MBB = MI.getParent(); MachineBasicBlock::const_instr_iterator II = diff --git a/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp index bc8652393f4..10394166ddf 100644 --- a/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp +++ b/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp @@ -79,8 +79,8 @@ bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) { } // We're looking for a sequence like this: - // %F0<def> = LFD 0, %X3<kill>, %QF0<imp-def>; mem:LD8[%a](tbaa=!2) - // %QF1<def> = QVESPLATI %QF0<kill>, 0, %RM<imp-use> + // %f0<def> = LFD 0, %x3<kill>, %qf0<imp-def>; mem:LD8[%a](tbaa=!2) + // %qf1<def> = QVESPLATI %qf0<kill>, 0, %rm<imp-use> for (auto SI = Splats.begin(); SI != Splats.end();) { MachineInstr *SMI = *SI; diff --git a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp index a57484e5abd..80b63b1c9df 100644 --- a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp +++ b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp @@ -92,18 +92,18 @@ protected: // ... // %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9 // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16, - // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16 + // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16 // ... // %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19, - // %RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19 + // %rm<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19 // ... // Where we can eliminate the copy by changing from the A-type to the // M-type instruction. Specifically, for this example, this means: // %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16, - // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16 + // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16 // is replaced by: // %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9, - // %RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9 + // %rm<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9 // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9 SlotIndex FMAIdx = LIS->getInstructionIndex(MI); @@ -150,7 +150,7 @@ protected: // walking the MIs we may as well test liveness here. // // FIXME: There is a case that occurs in practice, like this: - // %vreg9<def> = COPY %F1; VSSRC:%vreg9 + // %vreg9<def> = COPY %f1; VSSRC:%vreg9 // ... // %vreg6<def> = COPY %vreg9; VSSRC:%vreg6,%vreg9 // %vreg7<def> = COPY %vreg9; VSSRC:%vreg7,%vreg9 diff --git a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp index c07cc213c3e..9864aa37235 100644 --- a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp @@ -306,8 +306,8 @@ bool SparcFrameLowering::isLeafProc(MachineFunction &MF) const return !(MFI.hasCalls() // has calls || MRI.isPhysRegUsed(SP::L0) // Too many registers needed - || MRI.isPhysRegUsed(SP::O6) // %SP is used - || hasFP(MF)); // need %FP + || MRI.isPhysRegUsed(SP::O6) // %sp is used + || hasFP(MF)); // need %fp } void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const { diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp index 449c2f8cb78..8009341eab7 100644 --- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -436,8 +436,8 @@ bool SystemZElimCompare::optimizeCompareZero( // Also do a forward search to handle cases where an instruction after the // compare can be converted like // - // LTEBRCompare %F0S, %F0S, %CC<imp-def> LTEBRCompare %F0S, %F0S, %CC<imp-def> - // %F2S<def> = LER %F0S + // LTEBRCompare %f0s, %f0s, %cc<imp-def> LTEBRCompare %f0s, %f0s, %cc<imp-def> + // %f2s<def> = LER %f0s // MBBI = Compare, MBBE = MBB.end(); while (++MBBI != MBBE) { diff --git a/llvm/lib/Target/X86/README-SSE.txt b/llvm/lib/Target/X86/README-SSE.txt index e6896e80556..ffc404d5e33 100644 --- a/llvm/lib/Target/X86/README-SSE.txt +++ b/llvm/lib/Target/X86/README-SSE.txt @@ -145,15 +145,15 @@ This is the llvm code after instruction scheduling: cond_next140 (0xa910740, LLVM BB @0xa90beb0): %reg1078 = MOV32ri -3 - %reg1079 = ADD32rm %reg1078, %reg1068, 1, %NOREG, 0 - %reg1037 = MOV32rm %reg1024, 1, %NOREG, 40 + %reg1079 = ADD32rm %reg1078, %reg1068, 1, %noreg, 0 + %reg1037 = MOV32rm %reg1024, 1, %noreg, 40 %reg1080 = IMUL32rr %reg1079, %reg1037 - %reg1081 = MOV32rm %reg1058, 1, %NOREG, 0 + %reg1081 = MOV32rm %reg1058, 1, %noreg, 0 %reg1038 = LEA32r %reg1081, 1, %reg1080, -3 - %reg1036 = MOV32rm %reg1024, 1, %NOREG, 32 + %reg1036 = MOV32rm %reg1024, 1, %noreg, 32 %reg1082 = SHL32ri %reg1038, 4 %reg1039 = ADD32rr %reg1036, %reg1082 - %reg1083 = MOVAPSrm %reg1059, 1, %NOREG, 0 + %reg1083 = MOVAPSrm %reg1059, 1, %noreg, 0 %reg1034 = SHUFPSrr %reg1083, %reg1083, 170 %reg1032 = SHUFPSrr %reg1083, %reg1083, 0 %reg1035 = SHUFPSrr %reg1083, %reg1083, 255 @@ -166,32 +166,32 @@ cond_next140 (0xa910740, LLVM BB @0xa90beb0): Still ok. After register allocation: cond_next140 (0xa910740, LLVM BB @0xa90beb0): - %EAX = MOV32ri -3 - %EDX = MOV32rm <fi#3>, 1, %NOREG, 0 - ADD32rm %EAX<def&use>, %EDX, 1, %NOREG, 0 - %EDX = MOV32rm <fi#7>, 1, %NOREG, 0 - %EDX = MOV32rm %EDX, 1, %NOREG, 40 - IMUL32rr %EAX<def&use>, %EDX - %ESI = MOV32rm <fi#5>, 1, %NOREG, 0 - %ESI = MOV32rm %ESI, 1, %NOREG, 0 - MOV32mr <fi#4>, 1, %NOREG, 0, %ESI - %EAX = LEA32r %ESI, 1, %EAX, -3 - %ESI = MOV32rm <fi#7>, 1, %NOREG, 0 - %ESI = MOV32rm %ESI, 1, %NOREG, 32 - %EDI = MOV32rr %EAX - SHL32ri %EDI<def&use>, 4 - ADD32rr %EDI<def&use>, %ESI - %XMM0 = MOVAPSrm %ECX, 1, %NOREG, 0 - %XMM1 = MOVAPSrr %XMM0 - SHUFPSrr %XMM1<def&use>, %XMM1, 170 - %XMM2 = MOVAPSrr %XMM0 - SHUFPSrr %XMM2<def&use>, %XMM2, 0 - %XMM3 = MOVAPSrr %XMM0 - SHUFPSrr %XMM3<def&use>, %XMM3, 255 - SHUFPSrr %XMM0<def&use>, %XMM0, 85 - %EBX = MOV32rr %EDI - AND32ri8 %EBX<def&use>, 15 - CMP32ri8 %EBX, 0 + %eax = MOV32ri -3 + %edx = MOV32rm <fi#3>, 1, %noreg, 0 + ADD32rm %eax<def&use>, %edx, 1, %noreg, 0 + %edx = MOV32rm <fi#7>, 1, %noreg, 0 + %edx = MOV32rm %edx, 1, %noreg, 40 + IMUL32rr %eax<def&use>, %edx + %esi = MOV32rm <fi#5>, 1, %noreg, 0 + %esi = MOV32rm %esi, 1, %noreg, 0 + MOV32mr <fi#4>, 1, %noreg, 0, %esi + %eax = LEA32r %esi, 1, %eax, -3 + %esi = MOV32rm <fi#7>, 1, %noreg, 0 + %esi = MOV32rm %esi, 1, %noreg, 32 + %edi = MOV32rr %eax + SHL32ri %edi<def&use>, 4 + ADD32rr %edi<def&use>, %esi + %xmm0 = MOVAPSrm %ecx, 1, %noreg, 0 + %xmm1 = MOVAPSrr %xmm0 + SHUFPSrr %xmm1<def&use>, %xmm1, 170 + %xmm2 = MOVAPSrr %xmm0 + SHUFPSrr %xmm2<def&use>, %xmm2, 0 + %xmm3 = MOVAPSrr %xmm0 + SHUFPSrr %xmm3<def&use>, %xmm3, 255 + SHUFPSrr %xmm0<def&use>, %xmm0, 85 + %ebx = MOV32rr %edi + AND32ri8 %ebx<def&use>, 15 + CMP32ri8 %ebx, 0 JE mbb<cond_next204,0xa914d30> This looks really bad. The problem is shufps is a destructive opcode. Since it diff --git a/llvm/lib/Target/X86/README-X86-64.txt b/llvm/lib/Target/X86/README-X86-64.txt index 09626e13849..13856486b14 100644 --- a/llvm/lib/Target/X86/README-X86-64.txt +++ b/llvm/lib/Target/X86/README-X86-64.txt @@ -103,13 +103,13 @@ LBB1_3: ## bb Before regalloc, we have: - %reg1025<def> = IMUL32rri8 %reg1024, 45, %EFLAGS<imp-def> + %reg1025<def> = IMUL32rri8 %reg1024, 45, %eflags<imp-def> JMP mbb<bb2,0x203afb0> Successors according to CFG: 0x203afb0 (#3) bb1: 0x203af60, LLVM BB @0x1e02310, ID#2: Predecessors according to CFG: 0x203aec0 (#0) - %reg1026<def> = IMUL32rri8 %reg1024, 78, %EFLAGS<imp-def> + %reg1026<def> = IMUL32rri8 %reg1024, 78, %eflags<imp-def> Successors according to CFG: 0x203afb0 (#3) bb2: 0x203afb0, LLVM BB @0x1e02340, ID#3: diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td index 2de9a5fbfe9..5d806fe60b8 100644 --- a/llvm/lib/Target/X86/X86CallingConv.td +++ b/llvm/lib/Target/X86/X86CallingConv.td @@ -500,7 +500,7 @@ def CC_X86_64_C : CallingConv<[ // A SwiftError is passed in R12. CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>, - // For Swift Calling Convention, pass sret in %RAX. + // For Swift Calling Convention, pass sret in %rax. CCIfCC<"CallingConv::Swift", CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>, diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 9ea7590ce3a..03be87e467a 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1976,9 +1976,9 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { // Generate the DIV/IDIV instruction. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpEntry.OpDivRem)).addReg(Op1Reg); - // For i8 remainder, we can't reference AH directly, as we'll end - // up with bogus copies like %R9B = COPY %AH. Reference AX - // instead to prevent AH references in a REX instruction. + // For i8 remainder, we can't reference ah directly, as we'll end + // up with bogus copies like %r9b = COPY %ah. Reference ax + // instead to prevent ah references in a rex instruction. // // The current assumption of the fast register allocator is that isel // won't generate explicit references to the GR8_NOREX registers. If diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp index 9664c931c35..ce559323efc 100644 --- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -189,17 +189,17 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { /// So, it handles pattern like this: /// /// BB#2: derived from LLVM BB %if.then -/// Live Ins: %RDI +/// Live Ins: %rdi /// Predecessors according to CFG: BB#0 -/// %AX<def> = MOV16rm %RDI<kill>, 1, %noreg, 0, %noreg, %EAX<imp-def>; mem:LD2[%p] -/// No %EAX<imp-use> +/// %ax<def> = MOV16rm %rdi<kill>, 1, %noreg, 0, %noreg, %eax<imp-def>; mem:LD2[%p] +/// No %eax<imp-use> /// Successors according to CFG: BB#3(?%) /// /// BB#3: derived from LLVM BB %if.end -/// Live Ins: %EAX Only %AX is actually live +/// Live Ins: %eax Only %ax is actually live /// Predecessors according to CFG: BB#2 BB#1 -/// %AX<def> = KILL %AX, %EAX<imp-use,kill> -/// RET 0, %AX +/// %ax<def> = KILL %ax, %eax<imp-use,kill> +/// RET 0, %ax static bool isLive(const MachineInstr &MI, const LivePhysRegs &LiveRegs, const TargetRegisterInfo *TRI, diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp index d43f7a15409..6db02f0bd05 100644 --- a/llvm/lib/Target/X86/X86FloatingPoint.cpp +++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp @@ -516,7 +516,7 @@ void FPS::setupBlockStack() { // Push the fixed live-in registers. for (unsigned i = Bundle.FixCount; i > 0; --i) { - DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %FP" + DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %fp" << unsigned(Bundle.FixStack[i-1]) << '\n'); pushReg(Bundle.FixStack[i-1]); } @@ -893,7 +893,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { while (Kills && Defs) { unsigned KReg = countTrailingZeros(Kills); unsigned DReg = countTrailingZeros(Defs); - DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n"); + DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg << "\n"); std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]); std::swap(RegMap[KReg], RegMap[DReg]); Kills &= ~(1 << KReg); @@ -907,7 +907,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { unsigned KReg = getStackEntry(0); if (!(Kills & (1 << KReg))) break; - DEBUG(dbgs() << "Popping %FP" << KReg << "\n"); + DEBUG(dbgs() << "Popping %fp" << KReg << "\n"); popStackAfter(I2); Kills &= ~(1 << KReg); } @@ -916,7 +916,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { // Manually kill the rest. while (Kills) { unsigned KReg = countTrailingZeros(Kills); - DEBUG(dbgs() << "Killing %FP" << KReg << "\n"); + DEBUG(dbgs() << "Killing %fp" << KReg << "\n"); freeStackSlotBefore(I, KReg); Kills &= ~(1 << KReg); } @@ -924,7 +924,7 @@ void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { // Load zeros for all the imp-defs. while(Defs) { unsigned DReg = countTrailingZeros(Defs); - DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n"); + DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n"); BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0)); pushReg(DReg); Defs &= ~(1 << DReg); diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index bd8d447fb88..c1414a1baa5 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -10879,7 +10879,7 @@ X86InstrInfo::getOutliningType(MachineInstr &MI) const { // FIXME: There are instructions which are being manually built without // explicit uses/defs so we also have to check the MCInstrDesc. We should be // able to remove the extra checks once those are fixed up. For example, - // sometimes we might get something like %RAX<def> = POP64r 1. This won't be + // sometimes we might get something like %rax<def> = POP64r 1. This won't be // caught by modifiesRegister or readsRegister even though the instruction // really ought to be formed so that modifiesRegister/readsRegister would // catch it. diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index a0a34056bf5..f537dc18909 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -961,7 +961,7 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, // This is an optimization that lets us get away without emitting a nop in // many cases. // - // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two + // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two // bytes too, so the check on MinSize is important. MCI.setOpcode(X86::PUSH64rmr); } else { |

