diff options
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrCompiler.td | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 60 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86MCInstLower.cpp | 35 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86Subtarget.cpp | 32 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86TargetMachine.cpp | 5 |
6 files changed, 29 insertions, 115 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 026e6fcc9a7..f7fd221b517 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -940,11 +940,11 @@ bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) { bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP; - // In every code model except the 64-bit large code model, we can use an - // address mode to get the GOT entry or the global itself. + // Only do this address mode folding for 64-bit if we're in the small code + // model. + // FIXME: But we can do GOTPCREL addressing in the medium code model. CodeModel::Model M = TM.getCodeModel(); - if (Subtarget->is64Bit() && M != CodeModel::Small && M != CodeModel::Kernel && - !(M == CodeModel::Medium && IsRIPRel)) + if (Subtarget->is64Bit() && M != CodeModel::Small && M != CodeModel::Kernel) return true; // Base and index reg must be 0 in order to use %rip as base. diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 8d2403367a3..c863bac9722 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -37,10 +37,6 @@ let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP], def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), "", []>; -// 64-bit large code model PIC base construction. -let hasSideEffects = 0, mayLoad = 1, isNotDuplicable = 1, SchedRW = [WriteJump] in - def MOVGOT64r : PseudoI<(outs GR64:$reg), - (ins GR64:$scratch, i64i32imm_pcrel:$got), []>; // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into // a stack adjustment and the codegen must know that they may modify the stack diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 712276604ae..cfa25731c40 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -11333,9 +11333,7 @@ isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { /// TODO: Eliminate this and move the code to X86MachineFunctionInfo. /// unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { - assert((!Subtarget.is64Bit() || - MF->getTarget().getCodeModel() == CodeModel::Medium || - MF->getTarget().getCodeModel() == CodeModel::Large) && + assert(!Subtarget.is64Bit() && "X86-64 PIC uses RIP relative addressing"); X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); @@ -11346,8 +11344,7 @@ unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { // Create the register. The code to initialize it is inserted // later, by the CGBR pass (below). MachineRegisterInfo &RegInfo = MF->getRegInfo(); - GlobalBaseReg = RegInfo.createVirtualRegister( - Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); + GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); X86FI->setGlobalBaseReg(GlobalBaseReg); return GlobalBaseReg; } @@ -12721,10 +12718,9 @@ namespace { static_cast<const X86TargetMachine *>(&MF.getTarget()); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); - // Don't do anything in the 64-bit small and kernel code models. They use - // RIP-relative addressing for everything. - if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || - TM->getCodeModel() == CodeModel::Kernel)) + // Don't do anything if this is 64-bit as 64-bit PIC + // uses RIP relative addressing. + if (STI.is64Bit()) return false; // Only emit a global base reg in PIC mode. @@ -12751,41 +12747,17 @@ namespace { else PC = GlobalBaseReg; - if (STI.is64Bit()) { - if (TM->getCodeModel() == CodeModel::Medium) { - // In the medium code model, use a RIP-relative LEA to materialize the - // GOT. - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) - .addReg(X86::RIP) - .addImm(0) - .addReg(0) - .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") - .addReg(0); - } else if (TM->getCodeModel() == CodeModel::Large) { - // Loading the GOT in the large code model requires math with labels, - // so we use a pseudo instruction and expand it during MC emission. - unsigned Scratch = RegInfo.createVirtualRegister(&X86::GR64RegClass); - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVGOT64r), PC) - .addReg(Scratch, RegState::Undef | RegState::Define) - .addExternalSymbol("_GLOBAL_OFFSET_TABLE_"); - } else { - llvm_unreachable("unexpected code model"); - } - } else { - // Operand of MovePCtoStack is completely ignored by asm printer. It's - // only used in JIT code emission as displacement to pc. - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); - - // If we're using vanilla 'GOT' PIC style, we should use relative - // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external. - if (STI.isPICStyleGOT()) { - // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], - // %some_register - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) - .addReg(PC) - .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", - X86II::MO_GOT_ABSOLUTE_ADDRESS); - } + // Operand of MovePCtoStack is completely ignored by asm printer. It's + // only used in JIT code emission as displacement to pc. + BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); + + // If we're using vanilla 'GOT' PIC style, we should use relative addressing + // not to pc, but to _GLOBAL_OFFSET_TABLE_ external. + if (STI.isPICStyleGOT()) { + // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register + BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) + .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", + X86II::MO_GOT_ABSOLUTE_ADDRESS); } return true; diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index ee6161ad0a2..ae4b1e79c54 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1982,41 +1982,6 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } - case X86::MOVGOT64r: { - // Materializes the GOT for the 64-bit large code model. - MCSymbol *DotSym = OutContext.createTempSymbol(); - OutStreamer->EmitLabel(DotSym); - - unsigned DstReg = MI->getOperand(0).getReg(); - unsigned ScratchReg = MI->getOperand(1).getReg(); - MCSymbol *GOTSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); - - // .LtmpN: leaq .LtmpN(%rip), %dst - const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); - EmitAndCountInstruction(MCInstBuilder(X86::LEA64r) - .addReg(DstReg) // dest - .addReg(X86::RIP) // base - .addImm(1) // scale - .addReg(0) // index - .addExpr(DotExpr) // disp - .addReg(0)); // seg - - // movq $_GLOBAL_OFFSET_TABLE_ - .LtmpN, %scratch - const MCExpr *GOTSymExpr = MCSymbolRefExpr::create(GOTSym, OutContext); - const MCExpr *GOTDiffExpr = - MCBinaryExpr::createSub(GOTSymExpr, DotExpr, OutContext); - EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri) - .addReg(ScratchReg) // dest - .addExpr(GOTDiffExpr)); // disp - - // addq %scratch, %dst - EmitAndCountInstruction(MCInstBuilder(X86::ADD64rr) - .addReg(DstReg) // dest - .addReg(DstReg) // dest - .addReg(ScratchReg)); // src - return; - } - case X86::ADD32ri: { // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp index b7cab75f5a5..8e1e35eebba 100644 --- a/llvm/lib/Target/X86/X86Subtarget.cpp +++ b/llvm/lib/Target/X86/X86Subtarget.cpp @@ -68,30 +68,14 @@ X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const { unsigned char X86Subtarget::classifyLocalReference(const GlobalValue *GV) const { - // If we're not PIC, it's not very interesting. - if (!isPositionIndependent()) + // 64 bits can use %rip addressing for anything local. + if (is64Bit()) return X86II::MO_NO_FLAG; - // For 64-bit, we need to consider the code model. - if (is64Bit()) { - switch (TM.getCodeModel()) { - // 64-bit small code model is simple: All rip-relative. - case CodeModel::Small: - case CodeModel::Kernel: - return X86II::MO_NO_FLAG; - - // The large PIC code model uses GOTOFF. - case CodeModel::Large: - return X86II::MO_GOTOFF; - - // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data. - case CodeModel::Medium: - if (isa<Function>(GV)) - return X86II::MO_NO_FLAG; // All code is RIP-relative - return X86II::MO_GOTOFF; // Local symbols use GOTOFF. - } - llvm_unreachable("invalid code model"); - } + // If this is for a position dependent executable, the static linker can + // figure it out. + if (!isPositionIndependent()) + return X86II::MO_NO_FLAG; // The COFF dynamic linker just patches the executable sections. if (isTargetCOFF()) @@ -114,7 +98,7 @@ X86Subtarget::classifyLocalReference(const GlobalValue *GV) const { unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV, const Module &M) const { // Large model never uses stubs. - if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent()) + if (TM.getCodeModel() == CodeModel::Large) return X86II::MO_NO_FLAG; // Absolute symbols can be referenced directly. @@ -136,7 +120,7 @@ unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV, if (isTargetCOFF()) return X86II::MO_DLLIMPORT; - if (is64Bit() && TM.getCodeModel() != CodeModel::Large) + if (is64Bit()) return X86II::MO_GOTPCREL; if (isTargetDarwin()) { diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index 92f5e795d7d..a713bddf7bf 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -156,7 +156,6 @@ static std::string computeDataLayout(const Triple &TT) { } static Reloc::Model getEffectiveRelocModel(const Triple &TT, - bool JIT, Optional<Reloc::Model> RM) { bool is64Bit = TT.getArch() == Triple::x86_64; if (!RM.hasValue()) { @@ -168,8 +167,6 @@ static Reloc::Model getEffectiveRelocModel(const Triple &TT, return Reloc::PIC_; return Reloc::DynamicNoPIC; } - if (JIT) - return Reloc::Static; if (TT.isOSWindows() && is64Bit) return Reloc::PIC_; return Reloc::Static; @@ -213,7 +210,7 @@ X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT, CodeGenOpt::Level OL, bool JIT) : LLVMTargetMachine( T, computeDataLayout(TT), TT, CPU, FS, Options, - getEffectiveRelocModel(TT, JIT, RM), + getEffectiveRelocModel(TT, RM), getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL), TLOF(createTLOF(getTargetTriple())) { // Windows stack unwinder gets confused when execution flow "falls through" |