diff options
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r-- | llvm/lib/Target/X86/X86CallFrameOptimization.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ExpandPseudo.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86FixupBWInsts.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86FixupLEAs.cpp | 18 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 82 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrBuilder.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 90 |
7 files changed, 100 insertions, 109 deletions
diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp index 78bd2add8c3..23606a39d1a 100644 --- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp +++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp @@ -482,8 +482,7 @@ void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, if (isInt<8>(Val)) PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8; } - Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)) - .addOperand(PushOp); + Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).add(PushOp); break; case X86::MOV32mr: case X86::MOV64mr: @@ -496,9 +495,9 @@ void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, Reg = MRI->createVirtualRegister(&X86::GR64RegClass); BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg); BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg) - .addReg(UndefReg) - .addOperand(PushOp) - .addImm(X86::sub_32bit); + .addReg(UndefReg) + .add(PushOp) + .addImm(X86::sub_32bit); } // If PUSHrmm is not slow on this target, try to fold the source of the diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp index 192b942490e..c4bc29e963e 100644 --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -151,7 +151,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op)); for (unsigned i = 0; i != 5; ++i) - MIB.addOperand(MBBI->getOperand(i)); + MIB.add(MBBI->getOperand(i)); } else if (Opcode == X86::TCRETURNri64) { BuildMI(MBB, MBBI, DL, TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64)) @@ -214,7 +214,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL)); } for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I) - MIB.addOperand(MBBI->getOperand(I)); + MIB.add(MBBI->getOperand(I)); MBB.erase(MBBI); return true; } diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp index 8bde4bf98d6..3583980ff4f 100644 --- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -226,7 +226,7 @@ MachineInstr *FixupBWInstPass::tryReplaceLoad(unsigned New32BitOpcode, unsigned NumArgs = MI->getNumOperands(); for (unsigned i = 1; i < NumArgs; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -264,7 +264,7 @@ MachineInstr *FixupBWInstPass::tryReplaceCopy(MachineInstr *MI) const { // Drop imp-defs/uses that would be redundant with the new def/use. for (auto &Op : MI->implicit_operands()) if (Op.getReg() != (Op.isDef() ? NewDestReg : NewSrcReg)) - MIB.addOperand(Op); + MIB.add(Op); return MIB; } diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp index 12095917ca3..2cd4c1a3e7b 100644 --- a/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -120,8 +120,8 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, BuildMI(*MF, MI.getDebugLoc(), TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r : X86::LEA64r)) - .addOperand(Dest) - .addOperand(Src) + .add(Dest) + .add(Src) .addImm(1) .addReg(0) .addImm(0) @@ -287,8 +287,8 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I, MachineInstr *NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode)) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)); + .add(MI.getOperand(0)) + .add(MI.getOperand(1)); MFI->erase(I); I = static_cast<MachineBasicBlock::iterator>(NewMI); return true; @@ -377,9 +377,9 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I, const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3); const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1); NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode)) - .addOperand(Dst) - .addOperand(Src1) - .addOperand(Src2); + .add(Dst) + .add(Src1) + .add(Src2); MFI->insert(I, NewMI); DEBUG(NewMI->dump();); } @@ -387,8 +387,8 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I, if (MI.getOperand(4).getImm() != 0) { const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3); NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode)) - .addOperand(Dst) - .addOperand(SrcR) + .add(Dst) + .add(SrcR) .addImm(MI.getOperand(4).getImm()); MFI->insert(I, NewMI); DEBUG(NewMI->dump();); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index e07e148ee95..8a054462e5f 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -24347,7 +24347,7 @@ static MachineBasicBlock *emitPCMPSTRM(MachineInstr &MI, MachineBasicBlock *BB, for (unsigned i = 1; i < NumArgs; ++i) { MachineOperand &Op = MI.getOperand(i); if (!(Op.isReg() && Op.isImplicit())) - MIB.addOperand(Op); + MIB.add(Op); } if (MI.hasOneMemOperand()) MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -24383,7 +24383,7 @@ static MachineBasicBlock *emitPCMPSTRI(MachineInstr &MI, MachineBasicBlock *BB, for (unsigned i = 1; i < NumArgs; ++i) { MachineOperand &Op = MI.getOperand(i); if (!(Op.isReg() && Op.isImplicit())) - MIB.addOperand(Op); + MIB.add(Op); } if (MI.hasOneMemOperand()) MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -24443,7 +24443,7 @@ static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB, unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); for (int i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); unsigned ValOps = X86::AddrNumOperands; BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) @@ -24581,12 +24581,12 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Load the offset value into a register OffsetReg = MRI.createVirtualRegister(OffsetRegClass); BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, UseFPOffset ? 4 : 0) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, UseFPOffset ? 4 : 0) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // Check if there is enough room left to pull this argument. BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) @@ -24606,12 +24606,12 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Read the reg_save_area address. unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 16) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 16) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // Zero-extend the offset unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); @@ -24633,13 +24633,13 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Store it back into the va_list. BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, UseFPOffset ? 4 : 0) - .addOperand(Segment) - .addReg(NextOffsetReg) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, UseFPOffset ? 4 : 0) + .add(Segment) + .addReg(NextOffsetReg) + .setMemRefs(MMOBegin, MMOEnd); // Jump to endMBB BuildMI(offsetMBB, DL, TII->get(X86::JMP_1)) @@ -24653,12 +24653,12 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Load the overflow_area address into a register. unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 8) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 8) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // If we need to align it, do so. Otherwise, just copy the address // to OverflowDestReg. @@ -24689,13 +24689,13 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Store the new overflow address. BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 8) - .addOperand(Segment) - .addReg(NextAddrReg) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 8) + .add(Segment) + .addReg(NextAddrReg) + .setMemRefs(MMOBegin, MMOEnd); // If we branched, emit the PHI to the front of endMBB. if (offsetMBB) { @@ -25168,12 +25168,12 @@ X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI, // instruction using the same address operands. if (Operand.isReg()) Operand.setIsKill(false); - MIB.addOperand(Operand); + MIB.add(Operand); } MachineInstr *FOpMI = MIB; MIB = BuildMI(*BB, MI, DL, TII->get(MOp)); for (int i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill); MI.eraseFromParent(); // The pseudo instruction is gone now. return BB; @@ -25553,7 +25553,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset); else - MIB.addOperand(MI.getOperand(MemOpndSlot + i)); + MIB.add(MI.getOperand(MemOpndSlot + i)); } if (!UseImmLabel) MIB.addReg(LabelReg); @@ -25636,7 +25636,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, // Reload FP MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); for (unsigned i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MIB.setMemRefs(MMOBegin, MMOEnd); // Reload IP MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); @@ -25644,7 +25644,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(i), LabelOffset); else - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); } MIB.setMemRefs(MMOBegin, MMOEnd); // Reload SP @@ -25653,7 +25653,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(i), SPOffset); else - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); } MIB.setMemRefs(MMOBegin, MMOEnd); // Jump diff --git a/llvm/lib/Target/X86/X86InstrBuilder.h b/llvm/lib/Target/X86/X86InstrBuilder.h index ba970bc2048..dcce7b9951f 100644 --- a/llvm/lib/Target/X86/X86InstrBuilder.h +++ b/llvm/lib/Target/X86/X86InstrBuilder.h @@ -147,7 +147,7 @@ addOffset(const MachineInstrBuilder &MIB, int Offset) { static inline const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, const MachineOperand& Offset) { - return MIB.addImm(1).addReg(0).addOperand(Offset).addReg(0); + return MIB.addImm(1).addReg(0).add(Offset).addReg(0); } /// addRegOffset - This function is used to add a memory reference of the form diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 44343f53074..8c5dbd51866 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3565,7 +3565,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, const DebugLoc &DL = Orig.getDebugLoc(); BuildMI(MBB, I, DL, get(X86::MOV32ri)) - .addOperand(Orig.getOperand(0)) + .add(Orig.getOperand(0)) .addImm(Value); } else { MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); @@ -3650,10 +3650,10 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, // Virtual register of the wrong class, we have to create a temporary 64-bit // vreg to feed into the LEA. NewSrc = MF.getRegInfo().createVirtualRegister(RC); - MachineInstr *Copy = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), - get(TargetOpcode::COPY)) - .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) - .addOperand(Src); + MachineInstr *Copy = + BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) + .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) + .add(Src); // Which is obviously going to be dead after we're done with it. isKill = true; @@ -3819,10 +3819,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addOperand(Src) + .add(Src) .addImm(0) .addReg(0); break; @@ -3844,14 +3844,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) .addImm(0) .addReg(0); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = MIB; break; @@ -3865,10 +3865,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addOperand(Src) + .add(Src) .addImm(0) .addReg(0); break; @@ -3887,11 +3887,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, 1); break; @@ -3901,10 +3901,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - 1); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1); break; case X86::DEC64r: case X86::DEC32r: { @@ -3920,11 +3918,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, -1); @@ -3935,10 +3933,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - -1); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), -1); break; case X86::ADD64rr: case X86::ADD64rr_DB: @@ -3966,12 +3962,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, SrcReg2, isKill2, isUndef2, ImplicitOp2, LV)) return nullptr; - MachineInstrBuilder MIB = - BuildMI(MF, MI.getDebugLoc(), get(Opc)).addOperand(Dest); + MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); if (ImplicitOp2.getReg() != 0) - MIB.addOperand(ImplicitOp2); + MIB.add(ImplicitOp2); NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); @@ -3991,9 +3986,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Src2 = MI.getOperand(2).getReg(); bool isKill2 = MI.getOperand(2).isKill(); - NewMI = addRegReg( - BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).addOperand(Dest), - Src.getReg(), Src.isKill(), Src2, isKill2); + NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest), + Src.getReg(), Src.isKill(), Src2, isKill2); // Preserve undefness of the operands. bool isUndef = MI.getOperand(1).isUndef(); @@ -4010,10 +4004,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD64ri32_DB: case X86::ADD64ri8_DB: assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) - .addOperand(Dest) - .addOperand(Src), - MI.getOperand(2)); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), + MI.getOperand(2)); break; case X86::ADD32ri: case X86::ADD32ri8: @@ -4030,11 +4023,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, MI.getOperand(2)); break; @@ -4047,10 +4040,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - MI.getOperand(2)); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), + MI.getOperand(2)); break; } @@ -6040,7 +6032,7 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); for (unsigned i = 0, e = Addr.size(); i != e; ++i) - MIB.addOperand(Addr[i]); + MIB.add(Addr[i]); MIB.addReg(SrcReg, getKillRegState(isKill)); (*MIB).setMemRefs(MMOBegin, MMOEnd); NewMIs.push_back(MIB); @@ -6075,7 +6067,7 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) - MIB.addOperand(Addr[i]); + MIB.add(Addr[i]); (*MIB).setMemRefs(MMOBegin, MMOEnd); NewMIs.push_back(MIB); } @@ -6935,7 +6927,7 @@ static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, if (NumAddrOps < 4) { // FrameIndex only - add an immediate offset (whether its zero or not). for (unsigned i = 0; i != NumAddrOps; ++i) - MIB.addOperand(MOs[i]); + MIB.add(MOs[i]); addOffset(MIB, PtrOffset); } else { // General Memory Addressing - we need to add any offset to an existing @@ -6946,7 +6938,7 @@ static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs, if (i == 3 && PtrOffset != 0) { MIB.addDisp(MO, PtrOffset); } else { - MIB.addOperand(MO); + MIB.add(MO); } } } @@ -6968,11 +6960,11 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, unsigned NumOps = MI.getDesc().getNumOperands() - 2; for (unsigned i = 0; i != NumOps; ++i) { MachineOperand &MO = MI.getOperand(i + 2); - MIB.addOperand(MO); + MIB.add(MO); } for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - MIB.addOperand(MO); + MIB.add(MO); } MachineBasicBlock *MBB = InsertPt->getParent(); @@ -6997,7 +6989,7 @@ static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, assert(MO.isReg() && "Expected to fold into reg operand!"); addOperands(MIB, MOs, PtrOffset); } else { - MIB.addOperand(MO); + MIB.add(MO); } } @@ -7875,11 +7867,11 @@ bool X86InstrInfo::unfoldMemoryOperand( if (FoldedStore) MIB.addReg(Reg, RegState::Define); for (MachineOperand &BeforeOp : BeforeOps) - MIB.addOperand(BeforeOp); + MIB.add(BeforeOp); if (FoldedLoad) MIB.addReg(Reg); for (MachineOperand &AfterOp : AfterOps) - MIB.addOperand(AfterOp); + MIB.add(AfterOp); for (MachineOperand &ImpOp : ImpOps) { MIB.addReg(ImpOp.getReg(), getDefRegState(ImpOp.isDef()) | |