diff options
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/README-SSE.txt | 10 |
2 files changed, 8 insertions, 8 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 9e12b1a0e70..74aee126d45 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2835,7 +2835,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( // In this case we can still safely fold away the COPY and generate the // following spill code: // - // STRXui %xzr, <fi#0> + // STRXui %xzr, %stack.0 // // This also eliminates spilled cross register class COPYs (e.g. between x and // d regs) of the same size. For example: @@ -2891,7 +2891,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( // where the physical register source can be widened and stored to the full // virtual reg destination stack slot, in this case producing: // - // STRXui %xzr, <fi#0> + // STRXui %xzr, %stack.0 // if (IsSpill && DstMO.isUndef() && TargetRegisterInfo::isPhysicalRegister(SrcReg)) { @@ -2939,7 +2939,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( // where we can load the full virtual reg source stack slot, into the subreg // destination, in this case producing: // - // LDRWui %0:sub_32<def,read-undef>, <fi#0> + // LDRWui %0:sub_32<def,read-undef>, %stack.0 // if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) { const TargetRegisterClass *FillRC; diff --git a/llvm/lib/Target/X86/README-SSE.txt b/llvm/lib/Target/X86/README-SSE.txt index ffc404d5e33..73cf2769244 100644 --- a/llvm/lib/Target/X86/README-SSE.txt +++ b/llvm/lib/Target/X86/README-SSE.txt @@ -167,16 +167,16 @@ Still ok. After register allocation: cond_next140 (0xa910740, LLVM BB @0xa90beb0): %eax = MOV32ri -3 - %edx = MOV32rm <fi#3>, 1, %noreg, 0 + %edx = MOV32rm %stack.3, 1, %noreg, 0 ADD32rm %eax<def&use>, %edx, 1, %noreg, 0 - %edx = MOV32rm <fi#7>, 1, %noreg, 0 + %edx = MOV32rm %stack.7, 1, %noreg, 0 %edx = MOV32rm %edx, 1, %noreg, 40 IMUL32rr %eax<def&use>, %edx - %esi = MOV32rm <fi#5>, 1, %noreg, 0 + %esi = MOV32rm %stack.5, 1, %noreg, 0 %esi = MOV32rm %esi, 1, %noreg, 0 - MOV32mr <fi#4>, 1, %noreg, 0, %esi + MOV32mr %stack.4, 1, %noreg, 0, %esi %eax = LEA32r %esi, 1, %eax, -3 - %esi = MOV32rm <fi#7>, 1, %noreg, 0 + %esi = MOV32rm %stack.7, 1, %noreg, 0 %esi = MOV32rm %esi, 1, %noreg, 32 %edi = MOV32rr %eax SHL32ri %edi<def&use>, 4 |

