diff options
Diffstat (limited to 'llvm/lib/Target/X86/X86FastISel.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 246 |
1 files changed, 59 insertions, 187 deletions
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 8300715c97a..f60cbcdf9b4 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -23,7 +23,6 @@ #include "llvm/GlobalVariable.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" -#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/MachineConstantPool.h" @@ -85,8 +84,6 @@ private: bool X86SelectStore(const Instruction *I); - bool X86SelectRet(const Instruction *I); - bool X86SelectCmp(const Instruction *I); bool X86SelectZExt(const Instruction *I); @@ -108,7 +105,6 @@ private: bool X86SelectCall(const Instruction *I); CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false); - CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false); const X86InstrInfo *getInstrInfo() const { return getTargetMachine()->getInstrInfo(); @@ -182,20 +178,6 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC, return CC_X86_32_C; } -/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling -/// convention. -CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC, - bool isTaillCall) { - if (Subtarget->is64Bit()) { - if (Subtarget->isTargetWin64()) - return RetCC_X86_Win64_C; - else - return RetCC_X86_64_C; - } - - return RetCC_X86_32_C; -} - /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// Return true and the result register by reference if it is possible. @@ -248,8 +230,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, } ResultReg = createResultReg(RC); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc), ResultReg), AM); + addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); return true; } @@ -268,7 +249,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, case MVT::i1: { // Mask out all but lowest bit. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + BuildMI(MBB, DL, TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); Val = AndResult; } @@ -285,8 +266,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, break; } - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc)), AM).addReg(Val); + addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val); return true; } @@ -314,8 +294,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, } if (Opc) { - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc)), AM) + addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM) .addImm(Signed ? (uint64_t) CI->getSExtValue() : CI->getZExtValue()); return true; @@ -354,7 +333,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { // Don't walk into other basic blocks; it's possible we haven't // visited them yet, so the instructions may not yet be assigned // virtual registers. - if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) + if (FuncInfo.MBBMap[I->getParent()] != MBB) return false; Opcode = I->getOpcode(); @@ -551,8 +530,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { } LoadReg = createResultReg(RC); - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, - DL, TII.get(Opc), LoadReg), StubAM); + addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM); // Prevent loading GV stub multiple times in same MBB. LocalValueMap[V] = LoadReg; @@ -678,72 +656,6 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { return X86FastEmitStore(VT, I->getOperand(0), AM); } -/// X86SelectRet - Select and emit code to implement ret instructions. -bool X86FastISel::X86SelectRet(const Instruction *I) { - const ReturnInst *Ret = cast<ReturnInst>(I); - const Function &F = *I->getParent()->getParent(); - - if (!FuncInfo.CanLowerReturn) - return false; - - CallingConv::ID CC = F.getCallingConv(); - if (CC != CallingConv::C && - CC != CallingConv::Fast && - CC != CallingConv::X86_FastCall) - return false; - - if (Subtarget->isTargetWin64()) - return false; - - // fastcc with -tailcallopt is intended to provide a guaranteed - // tail call optimization. Fastisel doesn't know how to do that. - if (CC == CallingConv::Fast && GuaranteedTailCallOpt) - return false; - - // Let SDISel handle vararg functions. - if (F.isVarArg()) - return false; - - if (Ret->getNumOperands() > 0) { - SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), - Outs, TLI); - - // Analyze operands of the call, assigning locations to each operand. - SmallVector<CCValAssign, 16> ValLocs; - CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); - CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC)); - - const Value *RV = Ret->getOperand(0); - unsigned Reg = getRegForValue(RV); - if (Reg == 0) - return false; - - // Copy the return value into registers. - for (unsigned i = 0, e = ValLocs.size(); i != e; ++i) { - CCValAssign &VA = ValLocs[i]; - - // Don't bother handling odd stuff for now. - if (VA.getLocInfo() != CCValAssign::Full) - return false; - if (!VA.isRegLoc()) - return false; - - TargetRegisterClass* RC = TLI.getRegClassFor(VA.getValVT()); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - VA.getLocReg(), Reg + VA.getValNo(), - RC, RC, DL); - assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; - - MRI.addLiveOut(VA.getLocReg()); - } - } - - // Now emit the RET. - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET)); - return true; -} - /// X86SelectLoad - Select and emit code to implement load instructions. /// bool X86FastISel::X86SelectLoad(const Instruction *I) { @@ -808,9 +720,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc)) - .addReg(Op0Reg) - .addImm(Op1C->getSExtValue()); + BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg) + .addImm(Op1C->getSExtValue()); return true; } } @@ -820,9 +731,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, unsigned Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc)) - .addReg(Op0Reg) - .addReg(Op1Reg); + BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg); return true; } @@ -844,10 +753,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned EReg = createResultReg(&X86::GR8RegClass); unsigned NPReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::SETNPr), NPReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + BuildMI(MBB, DL, TII.get(X86::SETEr), EReg); + BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg); + BuildMI(MBB, DL, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); UpdateValueMap(I, ResultReg); return true; @@ -858,13 +766,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned NEReg = createResultReg(&X86::GR8RegClass); unsigned PReg = createResultReg(&X86::GR8RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::SETNEr), NEReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::SETPr), PReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::OR8rr), ResultReg) - .addReg(PReg).addReg(NEReg); + BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg); + BuildMI(MBB, DL, TII.get(X86::SETPr), PReg); + BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); UpdateValueMap(I, ResultReg); return true; } @@ -903,7 +807,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg); + BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg); UpdateValueMap(I, ResultReg); return true; } @@ -939,7 +843,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // Try to take advantage of fallthrough opportunities. CmpInst::Predicate Predicate = CI->getPredicate(); - if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { + if (MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); Predicate = CmpInst::getInversePredicate(Predicate); } @@ -988,18 +892,16 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc)) - .addMBB(TrueMBB); + BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB); if (Predicate == CmpInst::FCMP_UNE) { // X86 requires a second branch to handle UNE (and OEQ, // which is mapped to UNE above). - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4)) - .addMBB(TrueMBB); + BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB); } FastEmitBranch(FalseMBB, DL); - FuncInfo.MBB->addSuccessor(TrueMBB); + MBB->addSuccessor(TrueMBB); return true; } } else if (ExtractValueInst *EI = @@ -1025,8 +927,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned Reg = getRegForValue(EI); for (MachineBasicBlock::const_reverse_iterator - RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend(); - RI != RE; ++RI) { + RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) { const MachineInstr &MI = *RI; if (MI.definesRegister(Reg)) { @@ -1051,11 +952,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpCode = SetMI->getOpcode(); if (OpCode == X86::SETOr || OpCode == X86::SETBr) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4)) + BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? + X86::JO_4 : X86::JB_4)) .addMBB(TrueMBB); FastEmitBranch(FalseMBB, DL); - FuncInfo.MBB->addSuccessor(TrueMBB); + MBB->addSuccessor(TrueMBB); return true; } } @@ -1067,12 +968,10 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpReg = getRegForValue(BI->getCondition()); if (OpReg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) - .addReg(OpReg).addReg(OpReg); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4)) - .addMBB(TrueMBB); + BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); + BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB); FastEmitBranch(FalseMBB, DL); - FuncInfo.MBB->addSuccessor(TrueMBB); + MBB->addSuccessor(TrueMBB); return true; } @@ -1129,7 +1028,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { // Fold immediate in shl(x,3). if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { unsigned ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm), + BuildMI(MBB, DL, TII.get(OpImm), ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); UpdateValueMap(I, ResultReg); return true; @@ -1137,20 +1036,17 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - CReg, Op1Reg, RC, RC, DL); + TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL); // The shift instruction uses X86::CL. If we defined a super-register // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what // we're doing here. if (CReg != X86::CL) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) + BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) .addReg(CReg).addImm(X86::sub_8bit); unsigned ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg) - .addReg(Op0Reg); + BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1182,11 +1078,9 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { unsigned Op2Reg = getRegForValue(I->getOperand(2)); if (Op2Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) - .addReg(Op0Reg).addReg(Op0Reg); + BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); unsigned ResultReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) - .addReg(Op1Reg).addReg(Op2Reg); + BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1200,9 +1094,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR64RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::CVTSS2SDrr), ResultReg) - .addReg(OpReg); + BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1219,9 +1111,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR32RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(X86::CVTSD2SSrr), ResultReg) - .addReg(OpReg); + BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1256,8 +1146,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; unsigned CopyReg = createResultReg(CopyRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CopyOpc), CopyReg) - .addReg(InputReg); + BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg); // Then issue an extract_subreg. unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, @@ -1278,18 +1167,14 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) { switch (CI->getIntrinsicID()) { default: break; case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: { + case Intrinsic::uadd_with_overflow: // Cheat a little. We know that the registers for "add" and "seto" are // allocated sequentially. However, we only keep track of the register // for "add" in the value map. Use extractvalue's index to get the // correct register for "seto". - unsigned OpReg = getRegForValue(Agg); - if (OpReg == 0) - return false; - UpdateValueMap(I, OpReg + *EI->idx_begin()); + UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin()); return true; } - } } return false; @@ -1333,7 +1218,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg). + BuildMI(MBB, DL, TII.get(OpC), ResultReg). addImm(CI->isZero() ? -1ULL : 0); UpdateValueMap(&I, ResultReg); return true; @@ -1347,12 +1232,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); // FIXME may need to add RegState::Debug to any registers produced, // although ESP/EBP should be the only ones at the moment. - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM). - addImm(0).addMetadata(DI->getVariable()); + addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0). + addMetadata(DI->getVariable()); return true; } case Intrinsic::trap: { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP)); + BuildMI(MBB, DL, TII.get(X86::TRAP)); return true; } case Intrinsic::sadd_with_overflow: @@ -1388,8 +1273,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) - .addReg(Reg1).addReg(Reg2); + BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2); unsigned DestReg1 = UpdateValueMap(&I, ResultReg); // If the add with overflow is an intra-block value then we just want to @@ -1407,7 +1291,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { unsigned Opc = X86::SETBr; if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) Opc = X86::SETOr; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg); + BuildMI(MBB, DL, TII.get(Opc), ResultReg); return true; } } @@ -1534,8 +1418,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_START unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) - .addImm(NumBytes); + BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes); // Process argument: walk the register/memloc assignments, inserting // copies / loads. @@ -1591,8 +1474,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (VA.isRegLoc()) { TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - VA.getLocReg(), Arg, RC, RC, DL); + bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(), + Arg, RC, RC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; RegArgs.push_back(VA.getLocReg()); @@ -1618,8 +1501,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (Subtarget->isPICStyleGOT()) { TargetRegisterClass *RC = X86::GR32RegisterClass; unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, - X86::EBX, Base, RC, RC, DL); + bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC, + DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; } @@ -1629,8 +1512,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (CalleeOp) { // Register-indirect call. unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) - .addReg(CalleeOp); + MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp); } else { // Direct call. @@ -1659,8 +1541,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) - .addGlobalAddress(GV, 0, OpFlags); + MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags); } // Add an implicit use GOT pointer in EBX. @@ -1673,8 +1554,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_END unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) - .addImm(NumBytes).addImm(0); + BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0); // Now handle call return value (if any). SmallVector<unsigned, 4> UsedRegs; @@ -1701,7 +1581,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } unsigned ResultReg = createResultReg(DstRC); - bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, ResultReg, + bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, RVLocs[0].getLocReg(), DstRC, SrcRC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; @@ -1715,21 +1595,18 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, MemSize, false); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc)), FI) - .addReg(ResultReg); + addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg); DstRC = ResVT == MVT::f32 ? X86::FR32RegisterClass : X86::FR64RegisterClass; Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; ResultReg = createResultReg(DstRC); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), FI); + addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI); } if (AndToI1) { // Mask out all but lowest bit for some call which produces an i1. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + BuildMI(MBB, DL, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); ResultReg = AndResult; } @@ -1752,8 +1629,6 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return X86SelectLoad(I); case Instruction::Store: return X86SelectStore(I); - case Instruction::Ret: - return X86SelectRet(I); case Instruction::ICmp: case Instruction::FCmp: return X86SelectCmp(I); @@ -1854,8 +1729,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { else Opc = X86::LEA64r; unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), AM); + addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); return ResultReg; } return 0; @@ -1885,8 +1759,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { // Create the load from the constant pool. unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); unsigned ResultReg = createResultReg(RC); - addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), + addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), MCPOffset, PICBase, OpFlag); return ResultReg; @@ -1909,8 +1782,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, - TII.get(Opc), ResultReg), AM); + addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); return ResultReg; } |