diff options
author | Mehdi Amini <mehdi.amini@apple.com> | 2015-07-09 01:57:34 +0000 |
---|---|---|
committer | Mehdi Amini <mehdi.amini@apple.com> | 2015-07-09 01:57:34 +0000 |
commit | 56228dabfa51a3c810d258fcc1f2a8773c499c4d (patch) | |
tree | c5146dea93a4b557ce7c26f71cf005cbd0983701 /llvm/lib/Target | |
parent | 890a0e5543924c3a83397607f9522a838e74bdcd (diff) | |
download | bcm5719-llvm-56228dabfa51a3c810d258fcc1f2a8773c499c4d.tar.gz bcm5719-llvm-56228dabfa51a3c810d258fcc1f2a8773c499c4d.zip |
Redirect DataLayout from TargetMachine to Module in ComputeValueVTs()
Summary:
Avoid using the TargetMachine owned DataLayout and use the Module owned
one instead. This requires passing the DataLayout up the stack to
ComputeValueVTs().
This change is part of a series of commits dedicated to have a single
DataLayout during compilation by using always the one owned by the
module.
Reviewers: echristo
Subscribers: jholewinski, yaron.keren, rafael, llvm-commits
Differential Revision: http://reviews.llvm.org/D11019
From: Mehdi Amini <mehdi.amini@apple.com>
llvm-svn: 241773
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64FastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMFastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsFastISel.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 76 | ||||
-rw-r--r-- | llvm/lib/Target/PowerPC/PPCFastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 3 |
7 files changed, 50 insertions, 48 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index c19fcdc4bb1..a8dd4d670fc 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -3689,7 +3689,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { CallingConv::ID CC = F.getCallingConv(); SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 3a36d462799..a52bb7a0e08 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -2093,7 +2093,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { CallingConv::ID CC = F.getCallingConv(); if (Ret->getNumOperands() > 0) { SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index c2651b82d28..17ad7697184 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -1415,7 +1415,8 @@ bool MipsFastISel::selectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { CallingConv::ID CC = F.getCallingConv(); SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index cadd7a46cd9..12911da0e35 100644 --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -340,7 +340,7 @@ MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) { } void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) { - const DataLayout *TD = TM.getDataLayout(); + const DataLayout &DL = getDataLayout(); const TargetLowering *TLI = nvptxSubtarget->getTargetLowering(); Type *Ty = F->getReturnType(); @@ -369,17 +369,17 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) { O << ".param .b" << TLI->getPointerTy().getSizeInBits() << " func_retval0"; } else if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) { - unsigned totalsz = TD->getTypeAllocSize(Ty); + unsigned totalsz = DL.getTypeAllocSize(Ty); unsigned retAlignment = 0; if (!llvm::getAlign(*F, 0, retAlignment)) - retAlignment = TD->getABITypeAlignment(Ty); + retAlignment = DL.getABITypeAlignment(Ty); O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz << "]"; } else llvm_unreachable("Unknown return type"); } else { SmallVector<EVT, 16> vtparts; - ComputeValueVTs(*TLI, Ty, vtparts); + ComputeValueVTs(*TLI, DL, Ty, vtparts); unsigned idx = 0; for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; @@ -1579,7 +1579,7 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) { // Further, if a part is vector, print the above for // each vector element. SmallVector<EVT, 16> vtparts; - ComputeValueVTs(*TLI, ETy, vtparts); + ComputeValueVTs(*TLI, getDataLayout(), ETy, vtparts); for (unsigned i = 0, e = vtparts.size(); i != e; ++i) { unsigned elems = 1; EVT elemtype = vtparts[i]; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index f1c8272ffbc..e3d25f7936c 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -80,14 +80,14 @@ static bool IsPTXVectorType(MVT VT) { /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the /// same number of types as the Ins/Outs arrays in LowerFormalArguments, /// LowerCall, and LowerReturn. -static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty, - SmallVectorImpl<EVT> &ValueVTs, +static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, + Type *Ty, SmallVectorImpl<EVT> &ValueVTs, SmallVectorImpl<uint64_t> *Offsets = nullptr, uint64_t StartingOffset = 0) { SmallVector<EVT, 16> TempVTs; SmallVector<uint64_t, 16> TempOffsets; - ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset); + ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset); for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) { EVT VT = TempVTs[i]; uint64_t Off = TempOffsets[i]; @@ -960,7 +960,7 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args, O << "[" << sz << "]"; // update the index for Outs SmallVector<EVT, 16> vtparts; - ComputeValueVTs(*this, Ty, vtparts); + ComputeValueVTs(*this, *TD, Ty, vtparts); if (unsigned len = vtparts.size()) OIdx += len - 1; continue; @@ -1064,9 +1064,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, assert(isABI && "Non-ABI compilation is not supported"); if (!isABI) return Chain; - const DataLayout *TD = getDataLayout(); MachineFunction &MF = DAG.getMachineFunction(); const Function *F = MF.getFunction(); + auto &DL = MF.getDataLayout(); SDValue tempChain = Chain; Chain = DAG.getCALLSEQ_START(Chain, @@ -1096,11 +1096,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // aggregate SmallVector<EVT, 16> vtparts; SmallVector<uint64_t, 16> Offsets; - ComputePTXValueVTs(*this, Ty, vtparts, &Offsets, 0); + ComputePTXValueVTs(*this, DL, Ty, vtparts, &Offsets, 0); unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1); // declare .param .align <align> .b8 .param<n>[<size>]; - unsigned sz = TD->getTypeAllocSize(Ty); + unsigned sz = DL.getTypeAllocSize(Ty); SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, dl, MVT::i32), @@ -1140,7 +1140,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, EVT ObjectVT = getValueType(Ty); unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1); // declare .param .align <align> .b8 .param<n>[<size>]; - unsigned sz = TD->getTypeAllocSize(Ty); + unsigned sz = DL.getTypeAllocSize(Ty); SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, dl, MVT::i32), @@ -1321,7 +1321,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVector<uint64_t, 16> Offsets; const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty); assert(PTy && "Type of a byval parameter should be pointer"); - ComputePTXValueVTs(*this, PTy->getElementType(), vtparts, &Offsets, 0); + ComputePTXValueVTs(*this, DL, PTy->getElementType(), vtparts, &Offsets, 0); // declare .param .align <align> .b8 .param<n>[<size>]; unsigned sz = Outs[OIdx].Flags.getByValSize(); @@ -1371,12 +1371,12 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Handle Result if (Ins.size() > 0) { SmallVector<EVT, 16> resvtparts; - ComputeValueVTs(*this, retTy, resvtparts); + ComputeValueVTs(*this, DL, retTy, resvtparts); // Declare // .param .align 16 .b8 retval0[<size-in-bytes>], or // .param .b<size-in-bits> retval0 - unsigned resultsz = TD->getTypeAllocSizeInBits(retTy); + unsigned resultsz = DL.getTypeAllocSizeInBits(retTy); // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for // these three types to match the logic in // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype. @@ -1590,13 +1590,13 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt); InVals.push_back(Elt); } - Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); + Ofst += DL.getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); } } } else { SmallVector<EVT, 16> VTs; SmallVector<uint64_t, 16> Offsets; - ComputePTXValueVTs(*this, retTy, VTs, &Offsets, 0); + ComputePTXValueVTs(*this, DL, retTy, VTs, &Offsets, 0); assert(VTs.size() == Ins.size() && "Bad value decomposition"); unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0); for (unsigned i = 0, e = Ins.size(); i != e; ++i) { @@ -1608,8 +1608,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVector<EVT, 4> LoadRetVTs; EVT TheLoadType = VTs[i]; - if (retTy->isIntegerTy() && - TD->getTypeAllocSizeInBits(retTy) < 32) { + if (retTy->isIntegerTy() && DL.getTypeAllocSizeInBits(retTy) < 32) { // This is for integer types only, and specifically not for // aggregates. LoadRetVTs.push_back(MVT::i32); @@ -2064,7 +2063,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); - const DataLayout *TD = getDataLayout(); + const DataLayout &DL = MF.getDataLayout(); const Function *F = MF.getFunction(); const AttributeSet &PAL = F->getAttributes(); @@ -2118,7 +2117,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( if (Ty->isAggregateType()) { SmallVector<EVT, 16> vtparts; - ComputePTXValueVTs(*this, Ty, vtparts); + ComputePTXValueVTs(*this, DL, Ty, vtparts); assert(vtparts.size() > 0 && "empty aggregate type not expected"); for (unsigned parti = 0, parte = vtparts.size(); parti != parte; ++parti) { @@ -2156,7 +2155,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( // NOTE: Here, we lose the ability to issue vector loads for vectors // that are a part of a struct. This should be investigated in the // future. - ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0); + ComputePTXValueVTs(*this, DL, Ty, vtparts, &offsets, 0); assert(vtparts.size() > 0 && "empty aggregate type not expected"); bool aggregateIsPacked = false; if (StructType *STy = llvm::dyn_cast<StructType>(Ty)) @@ -2172,10 +2171,10 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, DAG.getConstant(offsets[parti], dl, getPointerTy())); - unsigned partAlign = - aggregateIsPacked ? 1 - : TD->getABITypeAlignment( - partVT.getTypeForEVT(F->getContext())); + unsigned partAlign = aggregateIsPacked + ? 1 + : DL.getABITypeAlignment( + partVT.getTypeForEVT(F->getContext())); SDValue p; if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) { ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ? @@ -2212,9 +2211,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( Value *SrcValue = Constant::getNullValue(PointerType::get( EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM)); SDValue P = DAG.getLoad( - EltVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, - false, true, - TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext()))); + EltVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, false, + true, + DL.getABITypeAlignment(EltVT.getTypeForEVT(F->getContext()))); if (P.getNode()) P.getNode()->setIROrder(idx + 1); @@ -2229,9 +2228,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( Value *SrcValue = Constant::getNullValue(PointerType::get( VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM)); SDValue P = DAG.getLoad( - VecVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, - false, true, - TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext()))); + VecVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, false, + true, + DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext()))); if (P.getNode()) P.getNode()->setIROrder(idx + 1); @@ -2275,7 +2274,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( SDValue P = DAG.getLoad( VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false, false, true, - TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext()))); + DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext()))); if (P.getNode()) P.getNode()->setIROrder(idx + 1); @@ -2288,7 +2287,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt); InVals.push_back(Elt); } - Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); + Ofst += DL.getTypeAllocSize(VecVT.getTypeForEVT(F->getContext())); } InsIdx += NumElts; } @@ -2307,14 +2306,15 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) { ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD; - p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg, - MachinePointerInfo(srcValue), ObjectVT, false, false, - false, - TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); + p = DAG.getExtLoad( + ExtOp, dl, Ins[InsIdx].VT, Root, Arg, MachinePointerInfo(srcValue), + ObjectVT, false, false, false, + DL.getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); } else { - p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg, - MachinePointerInfo(srcValue), false, false, false, - TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); + p = DAG.getLoad( + Ins[InsIdx].VT, dl, Root, Arg, MachinePointerInfo(srcValue), false, + false, false, + DL.getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext()))); } if (p.getNode()) p.getNode()->setIROrder(idx + 1); @@ -2493,7 +2493,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, } else { SmallVector<EVT, 16> ValVTs; SmallVector<uint64_t, 16> Offsets; - ComputePTXValueVTs(*this, RetTy, ValVTs, &Offsets, 0); + ComputePTXValueVTs(*this, DAG.getDataLayout(), RetTy, ValVTs, &Offsets, 0); assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition"); for (unsigned i = 0, e = Outs.size(); i != e; ++i) { diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index ac59542cb7a..b726b4599c3 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -1594,7 +1594,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 0e2b1d31f04..abcd0597a8e 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1000,7 +1000,8 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { SmallVector<ISD::OutputArg, 4> Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, + MF->getDataLayout()); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; |