diff options
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 10 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrCompiler.td | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 60 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86MCInstLower.cpp | 35 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86Subtarget.cpp | 34 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86TargetMachine.cpp | 8 |
6 files changed, 30 insertions, 121 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index c83307547cd..e54115e532a 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -954,13 +954,11 @@ bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) { bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP; - // We can't use an addressing mode in the 64-bit large code model. In the - // medium code model, we use can use an mode when RIP wrappers are present. - // That signifies access to globals that are known to be "near", such as the - // GOT itself. + // Only do this address mode folding for 64-bit if we're in the small code + // model. + // FIXME: But we can do GOTPCREL addressing in the medium code model. CodeModel::Model M = TM.getCodeModel(); - if (Subtarget->is64Bit() && - (M == CodeModel::Large || (M == CodeModel::Medium && !IsRIPRel))) + if (Subtarget->is64Bit() && M != CodeModel::Small && M != CodeModel::Kernel) return true; // Base and index reg must be 0 in order to use %rip as base. diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 8d391de2ce2..f1aa4faeeb8 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -37,10 +37,6 @@ let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP], def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), "", []>; -// 64-bit large code model PIC base construction. -let hasSideEffects = 0, mayLoad = 1, isNotDuplicable = 1, SchedRW = [WriteJump] in - def MOVGOT64r : PseudoI<(outs GR64:$reg), - (ins GR64:$scratch, i64i32imm_pcrel:$got), []>; // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into // a stack adjustment and the codegen must know that they may modify the stack diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 7492de27e7b..c20696e77fd 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -11248,9 +11248,7 @@ isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { /// TODO: Eliminate this and move the code to X86MachineFunctionInfo. /// unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { - assert((!Subtarget.is64Bit() || - MF->getTarget().getCodeModel() == CodeModel::Medium || - MF->getTarget().getCodeModel() == CodeModel::Large) && + assert(!Subtarget.is64Bit() && "X86-64 PIC uses RIP relative addressing"); X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); @@ -11261,8 +11259,7 @@ unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { // Create the register. The code to initialize it is inserted // later, by the CGBR pass (below). MachineRegisterInfo &RegInfo = MF->getRegInfo(); - GlobalBaseReg = RegInfo.createVirtualRegister( - Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass); + GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); X86FI->setGlobalBaseReg(GlobalBaseReg); return GlobalBaseReg; } @@ -12636,10 +12633,9 @@ namespace { static_cast<const X86TargetMachine *>(&MF.getTarget()); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); - // Don't do anything in the 64-bit small and kernel code models. They use - // RIP-relative addressing for everything. - if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small || - TM->getCodeModel() == CodeModel::Kernel)) + // Don't do anything if this is 64-bit as 64-bit PIC + // uses RIP relative addressing. + if (STI.is64Bit()) return false; // Only emit a global base reg in PIC mode. @@ -12666,41 +12662,17 @@ namespace { else PC = GlobalBaseReg; - if (STI.is64Bit()) { - if (TM->getCodeModel() == CodeModel::Medium) { - // In the medium code model, use a RIP-relative LEA to materialize the - // GOT. - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC) - .addReg(X86::RIP) - .addImm(0) - .addReg(0) - .addExternalSymbol("_GLOBAL_OFFSET_TABLE_") - .addReg(0); - } else if (TM->getCodeModel() == CodeModel::Large) { - // Loading the GOT in the large code model requires math with labels, - // so we use a pseudo instruction and expand it during MC emission. - unsigned Scratch = RegInfo.createVirtualRegister(&X86::GR64RegClass); - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVGOT64r), PC) - .addReg(Scratch, RegState::Undef | RegState::Define) - .addExternalSymbol("_GLOBAL_OFFSET_TABLE_"); - } else { - llvm_unreachable("unexpected code model"); - } - } else { - // Operand of MovePCtoStack is completely ignored by asm printer. It's - // only used in JIT code emission as displacement to pc. - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); - - // If we're using vanilla 'GOT' PIC style, we should use relative - // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external. - if (STI.isPICStyleGOT()) { - // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], - // %some_register - BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) - .addReg(PC) - .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", - X86II::MO_GOT_ABSOLUTE_ADDRESS); - } + // Operand of MovePCtoStack is completely ignored by asm printer. It's + // only used in JIT code emission as displacement to pc. + BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); + + // If we're using vanilla 'GOT' PIC style, we should use relative addressing + // not to pc, but to _GLOBAL_OFFSET_TABLE_ external. + if (STI.isPICStyleGOT()) { + // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register + BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) + .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", + X86II::MO_GOT_ABSOLUTE_ADDRESS); } return true; diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index ee6161ad0a2..ae4b1e79c54 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1982,41 +1982,6 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } - case X86::MOVGOT64r: { - // Materializes the GOT for the 64-bit large code model. - MCSymbol *DotSym = OutContext.createTempSymbol(); - OutStreamer->EmitLabel(DotSym); - - unsigned DstReg = MI->getOperand(0).getReg(); - unsigned ScratchReg = MI->getOperand(1).getReg(); - MCSymbol *GOTSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); - - // .LtmpN: leaq .LtmpN(%rip), %dst - const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); - EmitAndCountInstruction(MCInstBuilder(X86::LEA64r) - .addReg(DstReg) // dest - .addReg(X86::RIP) // base - .addImm(1) // scale - .addReg(0) // index - .addExpr(DotExpr) // disp - .addReg(0)); // seg - - // movq $_GLOBAL_OFFSET_TABLE_ - .LtmpN, %scratch - const MCExpr *GOTSymExpr = MCSymbolRefExpr::create(GOTSym, OutContext); - const MCExpr *GOTDiffExpr = - MCBinaryExpr::createSub(GOTSymExpr, DotExpr, OutContext); - EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri) - .addReg(ScratchReg) // dest - .addExpr(GOTDiffExpr)); // disp - - // addq %scratch, %dst - EmitAndCountInstruction(MCInstBuilder(X86::ADD64rr) - .addReg(DstReg) // dest - .addReg(DstReg) // dest - .addReg(ScratchReg)); // src - return; - } - case X86::ADD32ri: { // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp index e63a98556a7..8e1e35eebba 100644 --- a/llvm/lib/Target/X86/X86Subtarget.cpp +++ b/llvm/lib/Target/X86/X86Subtarget.cpp @@ -68,30 +68,14 @@ X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const { unsigned char X86Subtarget::classifyLocalReference(const GlobalValue *GV) const { - // If we're not PIC, it's not very interesting. - if (!isPositionIndependent()) + // 64 bits can use %rip addressing for anything local. + if (is64Bit()) return X86II::MO_NO_FLAG; - // For 64-bit, we need to consider the code model. - if (is64Bit()) { - switch (TM.getCodeModel()) { - // 64-bit small code model is simple: All rip-relative. - case CodeModel::Small: - case CodeModel::Kernel: - return X86II::MO_NO_FLAG; - - // The large PIC code model uses GOTOFF. - case CodeModel::Large: - return X86II::MO_GOTOFF; - - // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data. - case CodeModel::Medium: - if (isa<Function>(GV)) - return X86II::MO_NO_FLAG; // All code is RIP-relative - return X86II::MO_GOTOFF; // Local symbols use GOTOFF. - } - llvm_unreachable("invalid code model"); - } + // If this is for a position dependent executable, the static linker can + // figure it out. + if (!isPositionIndependent()) + return X86II::MO_NO_FLAG; // The COFF dynamic linker just patches the executable sections. if (isTargetCOFF()) @@ -113,8 +97,8 @@ X86Subtarget::classifyLocalReference(const GlobalValue *GV) const { unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV, const Module &M) const { - // The static large model never uses stubs. - if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent()) + // Large model never uses stubs. + if (TM.getCodeModel() == CodeModel::Large) return X86II::MO_NO_FLAG; // Absolute symbols can be referenced directly. @@ -136,7 +120,7 @@ unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV, if (isTargetCOFF()) return X86II::MO_DLLIMPORT; - if (is64Bit() && TM.getCodeModel() != CodeModel::Large) + if (is64Bit()) return X86II::MO_GOTPCREL; if (isTargetDarwin()) { diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index c5f476b54b9..5de3b8a992e 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -156,15 +156,9 @@ static std::string computeDataLayout(const Triple &TT) { } static Reloc::Model getEffectiveRelocModel(const Triple &TT, - bool JIT, Optional<Reloc::Model> RM) { bool is64Bit = TT.getArch() == Triple::x86_64; if (!RM.hasValue()) { - // JIT codegen should use static relocations by default, since it's - // typically executed in process and not relocatable. - if (JIT) - return Reloc::Static; - // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode. // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we // use static relocation model by default. @@ -216,7 +210,7 @@ X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT, CodeGenOpt::Level OL, bool JIT) : LLVMTargetMachine( T, computeDataLayout(TT), TT, CPU, FS, Options, - getEffectiveRelocModel(TT, JIT, RM), + getEffectiveRelocModel(TT, RM), getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL), TLOF(createTLOF(getTargetTriple())) { // Windows stack unwinder gets confused when execution flow "falls through" |

