diff options
author | Eric Christopher <echristo@gmail.com> | 2015-01-26 19:03:15 +0000 |
---|---|---|
committer | Eric Christopher <echristo@gmail.com> | 2015-01-26 19:03:15 +0000 |
commit | 8b7706517cbdbf1f17481884227a6d3e0df57f13 (patch) | |
tree | c28d4d5cfb9a6d6ffe4fa9f48a7c8ebfb600b639 /llvm/lib/Target/ARM | |
parent | a7ad6a589c30bf316b2e0e9456f521de12d7a679 (diff) | |
download | bcm5719-llvm-8b7706517cbdbf1f17481884227a6d3e0df57f13.tar.gz bcm5719-llvm-8b7706517cbdbf1f17481884227a6d3e0df57f13.zip |
Move DataLayout back to the TargetMachine from TargetSubtargetInfo
derived classes.
Since global data alignment, layout, and mangling is often based on the
DataLayout, move it to the TargetMachine. This ensures that global
data is going to be layed out and mangled consistently if the subtarget
changes on a per function basis. Prior to this all targets(*) have
had subtarget dependent code moved out and onto the TargetMachine.
*One target hasn't been migrated as part of this change: R600. The
R600 port has, as a subtarget feature, the size of pointers and
this affects global data layout. I've currently hacked in a FIXME
to enable progress, but the port needs to be updated to either pass
the 64-bitness to the TargetMachine, or fix the DataLayout to
avoid subtarget dependent features.
llvm-svn: 227113
Diffstat (limited to 'llvm/lib/Target/ARM')
-rw-r--r-- | llvm/lib/Target/ARM/ARMAsmPrinter.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMConstantIslandPass.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMInstrInfo.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMSubtarget.cpp | 74 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMSubtarget.h | 8 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMTargetMachine.cpp | 53 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMTargetMachine.h | 2 |
8 files changed, 86 insertions, 76 deletions
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 850c3c789dc..8a3f8f85dd4 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -83,8 +83,7 @@ void ARMAsmPrinter::EmitFunctionEntryLabel() { } void ARMAsmPrinter::EmitXXStructor(const Constant *CV) { - uint64_t Size = - TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(CV->getType()); + uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType()); assert(Size && "C++ constructor pointer had zero size!"); const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts()); @@ -207,7 +206,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, MCSymbol *ARMAsmPrinter:: GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const { - const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout(); + const DataLayout *DL = TM.getDataLayout(); SmallString<60> Name; raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << uid << '_' << uid2; @@ -216,7 +215,7 @@ GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const { MCSymbol *ARMAsmPrinter::GetARMSJLJEHLabel() const { - const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout(); + const DataLayout *DL = TM.getDataLayout(); SmallString<60> Name; raw_svector_ostream(Name) << DL->getPrivateGlobalPrefix() << "SJLJEH" << getFunctionNumber(); @@ -586,7 +585,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) { MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList(); if (!Stubs.empty()) { OutStreamer.SwitchSection(TLOFELF.getDataRelSection()); - const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout(); + const DataLayout *TD = TM.getDataLayout(); for (auto &stub: Stubs) { OutStreamer.EmitLabel(stub.first); @@ -924,9 +923,8 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV, void ARMAsmPrinter:: EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) { - const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout(); - int Size = - TM.getSubtargetImpl()->getDataLayout()->getTypeAllocSize(MCPV->getType()); + const DataLayout *DL = TM.getDataLayout(); + int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType()); ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV); @@ -1242,7 +1240,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { #include "ARMGenMCPseudoLowering.inc" void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { - const DataLayout *DL = TM.getSubtargetImpl()->getDataLayout(); + const DataLayout *DL = TM.getDataLayout(); // If we just ended a constant pool, mark it as such. if (InConstantPool && MI->getOpcode() != ARM::CONSTPOOL_ENTRY) { diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp index 5d295317c55..40b4b7219cc 100644 --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -532,7 +532,7 @@ ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) { // identity mapping of CPI's to CPE's. const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants(); - const DataLayout &TD = *MF->getSubtarget().getDataLayout(); + const DataLayout &TD = *MF->getTarget().getDataLayout(); for (unsigned i = 0, e = CPs.size(); i != e; ++i) { unsigned Size = TD.getTypeAllocSize(CPs[i].getType()); assert(Size >= 4 && "Too small constant pool entry"); diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/llvm/lib/Target/ARM/ARMInstrInfo.cpp index 17d1ffaa9ff..dc6d1bf4bae 100644 --- a/llvm/lib/Target/ARM/ARMInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMInstrInfo.cpp @@ -156,9 +156,8 @@ namespace { ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create( *Context, "_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, PCAdj); - unsigned Align = - TM->getSubtargetImpl()->getDataLayout()->getPrefTypeAlignment( - Type::getInt32PtrTy(*Context)); + unsigned Align = TM->getDataLayout()->getPrefTypeAlignment( + Type::getInt32PtrTy(*Context)); unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align); MachineBasicBlock &FirstMBB = MF.front(); diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index fda3e815624..6fa2992d5b1 100644 --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1861,7 +1861,7 @@ namespace { } bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { - TD = Fn.getSubtarget().getDataLayout(); + TD = Fn.getTarget().getDataLayout(); TII = Fn.getSubtarget().getInstrInfo(); TRI = Fn.getSubtarget().getRegisterInfo(); STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget()); diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index 311afe909cf..8b919f40b97 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -88,56 +88,6 @@ IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), "Allow IT blocks based on ARMv7"), clEnumValEnd)); -static std::string computeDataLayout(ARMSubtarget &ST) { - std::string Ret = ""; - - if (ST.isLittle()) - // Little endian. - Ret += "e"; - else - // Big endian. - Ret += "E"; - - Ret += DataLayout::getManglingComponent(ST.getTargetTriple()); - - // Pointers are 32 bits and aligned to 32 bits. - Ret += "-p:32:32"; - - // ABIs other than APCS have 64 bit integers with natural alignment. - if (!ST.isAPCS_ABI()) - Ret += "-i64:64"; - - // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 - // bits, others to 64 bits. We always try to align to 64 bits. - if (ST.isAPCS_ABI()) - Ret += "-f64:32:64"; - - // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others - // to 64. We always ty to give them natural alignment. - if (ST.isAPCS_ABI()) - Ret += "-v64:32:64-v128:32:128"; - else - Ret += "-v128:64:128"; - - // Try to align aggregates to 32 bits (the default is 64 bits, which has no - // particular hardware support on 32-bit ARM). - Ret += "-a:0:32"; - - // Integer registers are 32 bits. - Ret += "-n32"; - - // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit - // aligned everywhere else. - if (ST.isTargetNaCl()) - Ret += "-S128"; - else if (ST.isAAPCS_ABI()) - Ret += "-S64"; - else - Ret += "-S32"; - - return Ret; -} - /// initializeSubtargetDependencies - Initializes using a CPU and feature string /// so that we can use initializer lists for subtarget initialization. ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU, @@ -147,23 +97,31 @@ ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU, return *this; } +ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU, + StringRef FS) { + ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS); + if (STI.isThumb1Only()) + return (ARMFrameLowering *)new Thumb1FrameLowering(STI); + + return new ARMFrameLowering(STI); +} + ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS, const ARMBaseTargetMachine &TM, - bool IsLittle) + const std::string &FS, + const ARMBaseTargetMachine &TM, bool IsLittle) : ARMGenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others), ARMProcClass(None), stackAlignment(4), CPUString(CPU), IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM), - DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS))), - TSInfo(DL), + TSInfo(*TM.getDataLayout()), + FrameLowering(initializeFrameLowering(CPU, FS)), + // At this point initializeSubtargetDependencies has been called so + // we can query directly. InstrInfo(isThumb1Only() ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) : !isThumb() ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), - TLInfo(TM), - FrameLowering(!isThumb1Only() - ? new ARMFrameLowering(*this) - : (ARMFrameLowering *)new Thumb1FrameLowering(*this)) {} + TLInfo(TM) {} void ARMSubtarget::initializeEnvironment() { HasV4TOps = false; diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index dbacd4d6aad..dd1ab3aabcf 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -248,7 +248,6 @@ public: /// so that we can use initializer lists for subtarget initialization. ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); - const DataLayout *getDataLayout() const override { return &DL; } const ARMSelectionDAGInfo *getSelectionDAGInfo() const override { return &TSInfo; } @@ -266,16 +265,17 @@ public: } private: - const DataLayout DL; ARMSelectionDAGInfo TSInfo; + // Either Thumb1FrameLowering or ARMFrameLowering. + std::unique_ptr<ARMFrameLowering> FrameLowering; // Either Thumb1InstrInfo or Thumb2InstrInfo. std::unique_ptr<ARMBaseInstrInfo> InstrInfo; ARMTargetLowering TLInfo; - // Either Thumb1FrameLowering or ARMFrameLowering. - std::unique_ptr<ARMFrameLowering> FrameLowering; void initializeEnvironment(); void initSubtargetFeatures(StringRef CPU, StringRef FS); + ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS); + public: void computeIssueWidth(); diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp index 7a8181b7528..2041e682dc2 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp +++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp @@ -104,6 +104,58 @@ computeTargetABI(const Triple &TT, StringRef CPU, return TargetABI; } +static std::string computeDataLayout(const Triple &TT, + ARMBaseTargetMachine::ARMABI ABI, + bool isLittle) { + std::string Ret = ""; + + if (isLittle) + // Little endian. + Ret += "e"; + else + // Big endian. + Ret += "E"; + + Ret += DataLayout::getManglingComponent(TT); + + // Pointers are 32 bits and aligned to 32 bits. + Ret += "-p:32:32"; + + // ABIs other than APCS have 64 bit integers with natural alignment. + if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) + Ret += "-i64:64"; + + // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 + // bits, others to 64 bits. We always try to align to 64 bits. + if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) + Ret += "-f64:32:64"; + + // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others + // to 64. We always ty to give them natural alignment. + if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) + Ret += "-v64:32:64-v128:32:128"; + else + Ret += "-v128:64:128"; + + // Try to align aggregates to 32 bits (the default is 64 bits, which has no + // particular hardware support on 32-bit ARM). + Ret += "-a:0:32"; + + // Integer registers are 32 bits. + Ret += "-n32"; + + // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit + // aligned everywhere else. + if (TT.isOSNaCl()) + Ret += "-S128"; + else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) + Ret += "-S64"; + else + Ret += "-S32"; + + return Ret; +} + /// TargetMachine ctor - Create an ARM architecture model. /// ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, @@ -113,6 +165,7 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL, bool isLittle) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), TargetABI(computeTargetABI(Triple(TT), CPU, Options)), + DL(computeDataLayout(Triple(TT), TargetABI, isLittle)), TLOF(createTLOF(Triple(getTargetTriple()))), Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.h b/llvm/lib/Target/ARM/ARMTargetMachine.h index 18cf5fa0fa0..31e316a646e 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.h +++ b/llvm/lib/Target/ARM/ARMTargetMachine.h @@ -30,6 +30,7 @@ public: } TargetABI; protected: + const DataLayout DL; std::unique_ptr<TargetLoweringObjectFile> TLOF; ARMSubtarget Subtarget; bool isLittle; @@ -46,6 +47,7 @@ public: const ARMSubtarget *getSubtargetImpl() const override { return &Subtarget; } const ARMSubtarget *getSubtargetImpl(const Function &F) const override; + const DataLayout *getDataLayout() const override { return &DL; } /// \brief Register ARM analysis passes with a pass manager. void addAnalysisPasses(PassManagerBase &PM) override; |