diff options
author | Daniel Sanders <daniel_l_sanders@apple.com> | 2017-02-28 14:21:31 +0000 |
---|---|---|
committer | Daniel Sanders <daniel_l_sanders@apple.com> | 2017-02-28 14:21:31 +0000 |
commit | a5afdefec635beb686f3c541fc2259aed4f97fc4 (patch) | |
tree | a2563c02a318bfa3414d125373eff95168835569 /llvm/lib | |
parent | 1ffca2aeafa86aa90f824f0cee249306cb22ea55 (diff) | |
download | bcm5719-llvm-a5afdefec635beb686f3c541fc2259aed4f97fc4.tar.gz bcm5719-llvm-a5afdefec635beb686f3c541fc2259aed4f97fc4.zip |
[globalisel] Change LLT constructor string into an LLT subclass that knows how to generate it.
Summary:
This will allow future patches to inspect the details of the LLT. The implementation is now split between
the Support and CodeGen libraries to allow TableGen to use this class without introducing layering concerns.
Thanks to Ahmed Bougacha for finding a reasonable way to avoid the layering issue and providing the version of this patch without that problem.
Reviewers: t.p.northover, qcolombet, rovka, aditya_nandakumar, ab, javed.absar
Subscribers: arsenm, nhaehnle, mgorny, dberris, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D30046
llvm-svn: 296474
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 24 | ||||
-rw-r--r-- | llvm/lib/CodeGen/LowLevelType.cpp | 55 | ||||
-rw-r--r-- | llvm/lib/Support/CMakeLists.txt | 1 | ||||
-rw-r--r-- | llvm/lib/Support/LowLevelType.cpp | 47 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64CallLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86CallLowering.cpp | 5 |
7 files changed, 77 insertions, 61 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index c78de71a0e2..b7bcb073d07 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -82,7 +82,8 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) { // we need to concat together to produce the value. assert(Val.getType()->isSized() && "Don't know how to create an empty vreg"); - unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL}); + unsigned VReg = + MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL)); ValReg = VReg; if (auto CV = dyn_cast<Constant>(&Val)) { @@ -233,7 +234,7 @@ bool IRTranslator::translateSwitch(const User &U, const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); const BasicBlock *OrigBB = SwInst.getParent(); - LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL); + LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); for (auto &CaseIt : SwInst.cases()) { const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); @@ -289,7 +290,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Res = getOrCreateVReg(LI); unsigned Addr = getOrCreateVReg(*LI.getPointerOperand()); - LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL}; + MIRBuilder.buildLoad( Res, Addr, *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), @@ -307,8 +308,6 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Val = getOrCreateVReg(*SI.getValueOperand()); unsigned Addr = getOrCreateVReg(*SI.getPointerOperand()); - LLT VTy{*SI.getValueOperand()->getType(), *DL}, - PTy{*SI.getPointerOperand()->getType(), *DL}; MIRBuilder.buildStore( Val, Addr, @@ -384,7 +383,8 @@ bool IRTranslator::translateSelect(const User &U, bool IRTranslator::translateBitCast(const User &U, MachineIRBuilder &MIRBuilder) { - if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) { + if (getLLTForType(*U.getOperand(0)->getType(), *DL) == + getLLTForType(*U.getType(), *DL)) { unsigned &Reg = ValToVReg[&U]; if (Reg) MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0))); @@ -411,7 +411,7 @@ bool IRTranslator::translateGetElementPtr(const User &U, Value &Op0 = *U.getOperand(0); unsigned BaseReg = getOrCreateVReg(Op0); - LLT PtrTy{*Op0.getType(), *DL}; + LLT PtrTy = getLLTForType(*Op0.getType(), *DL); unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace()); LLT OffsetTy = LLT::scalar(PtrSize); @@ -477,7 +477,7 @@ bool IRTranslator::translateGetElementPtr(const User &U, bool IRTranslator::translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder, unsigned ID) { - LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL}; + LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); Type *DstTy = CI.getArgOperand(0)->getType(); if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) @@ -534,7 +534,7 @@ void IRTranslator::getStackGuard(unsigned DstReg, bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, MachineIRBuilder &MIRBuilder) { - LLT Ty{*CI.getOperand(0)->getType(), *DL}; + LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL); LLT s1 = LLT::scalar(1); unsigned Width = Ty.getSizeInBits(); unsigned Res = MRI->createGenericVirtualRegister(Ty); @@ -677,7 +677,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, getStackGuard(getOrCreateVReg(CI), MIRBuilder); return true; case Intrinsic::stackprotector: { - LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL}; + LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); getStackGuard(GuardVal, MIRBuilder); @@ -820,7 +820,7 @@ bool IRTranslator::translateLandingPad(const User &U, SmallVector<LLT, 2> Tys; for (Type *Ty : cast<StructType>(LP.getType())->elements()) - Tys.push_back(LLT{*Ty, *DL}); + Tys.push_back(getLLTForType(*Ty, *DL)); assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); // Mark exception register as live in. @@ -885,7 +885,7 @@ bool IRTranslator::translateAlloca(const User &U, MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty)); MIRBuilder.buildMul(AllocSize, NumElts, TySize); - LLT PtrTy = LLT{*AI.getType(), *DL}; + LLT PtrTy = getLLTForType(*AI.getType(), *DL); auto &TLI = *MF->getSubtarget().getTargetLowering(); unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); diff --git a/llvm/lib/CodeGen/LowLevelType.cpp b/llvm/lib/CodeGen/LowLevelType.cpp index d74b7306e0f..c4b9068fa90 100644 --- a/llvm/lib/CodeGen/LowLevelType.cpp +++ b/llvm/lib/CodeGen/LowLevelType.cpp @@ -1,4 +1,4 @@ -//===-- llvm/CodeGen/GlobalISel/LowLevelType.cpp --------------------------===// +//===-- llvm/CodeGen/LowLevelType.cpp -------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -18,54 +18,21 @@ #include "llvm/Support/raw_ostream.h" using namespace llvm; -LLT::LLT(Type &Ty, const DataLayout &DL) { +LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) { if (auto VTy = dyn_cast<VectorType>(&Ty)) { - SizeInBits = VTy->getElementType()->getPrimitiveSizeInBits(); - ElementsOrAddrSpace = VTy->getNumElements(); - Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; + auto NumElements = VTy->getNumElements(); + auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits(); + if (NumElements == 1) + return LLT::scalar(ScalarSizeInBits); + return LLT::vector(NumElements, ScalarSizeInBits); } else if (auto PTy = dyn_cast<PointerType>(&Ty)) { - Kind = Pointer; - SizeInBits = DL.getTypeSizeInBits(&Ty); - ElementsOrAddrSpace = PTy->getAddressSpace(); + return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty)); } else if (Ty.isSized()) { // Aggregates are no different from real scalars as far as GlobalISel is // concerned. - Kind = Scalar; - SizeInBits = DL.getTypeSizeInBits(&Ty); - ElementsOrAddrSpace = 1; + auto SizeInBits = DL.getTypeSizeInBits(&Ty); assert(SizeInBits != 0 && "invalid zero-sized type"); - } else { - Kind = Invalid; - SizeInBits = ElementsOrAddrSpace = 0; + return LLT::scalar(SizeInBits); } -} - -LLT::LLT(MVT VT) { - if (VT.isVector()) { - SizeInBits = VT.getVectorElementType().getSizeInBits(); - ElementsOrAddrSpace = VT.getVectorNumElements(); - Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; - } else if (VT.isValid()) { - // Aggregates are no different from real scalars as far as GlobalISel is - // concerned. - Kind = Scalar; - SizeInBits = VT.getSizeInBits(); - ElementsOrAddrSpace = 1; - assert(SizeInBits != 0 && "invalid zero-sized type"); - } else { - Kind = Invalid; - SizeInBits = ElementsOrAddrSpace = 0; - } -} - -void LLT::print(raw_ostream &OS) const { - if (isVector()) - OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">"; - else if (isPointer()) - OS << "p" << getAddressSpace(); - else if (isValid()) { - assert(isScalar() && "unexpected type"); - OS << "s" << getScalarSizeInBits(); - } else - llvm_unreachable("trying to print an invalid type"); + return LLT(); } diff --git a/llvm/lib/Support/CMakeLists.txt b/llvm/lib/Support/CMakeLists.txt index 4f7f2166cd0..4011cde3266 100644 --- a/llvm/lib/Support/CMakeLists.txt +++ b/llvm/lib/Support/CMakeLists.txt @@ -68,6 +68,7 @@ add_llvm_library(LLVMSupport LineIterator.cpp Locale.cpp LockFileManager.cpp + LowLevelType.cpp ManagedStatic.cpp MathExtras.cpp MemoryBuffer.cpp diff --git a/llvm/lib/Support/LowLevelType.cpp b/llvm/lib/Support/LowLevelType.cpp new file mode 100644 index 00000000000..4290d69cd19 --- /dev/null +++ b/llvm/lib/Support/LowLevelType.cpp @@ -0,0 +1,47 @@ +//===-- llvm/Support/LowLevelType.cpp -------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file This file implements the more header-heavy bits of the LLT class to +/// avoid polluting users' namespaces. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/LowLevelTypeImpl.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +LLT::LLT(MVT VT) { + if (VT.isVector()) { + SizeInBits = VT.getVectorElementType().getSizeInBits(); + ElementsOrAddrSpace = VT.getVectorNumElements(); + Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector; + } else if (VT.isValid()) { + // Aggregates are no different from real scalars as far as GlobalISel is + // concerned. + Kind = Scalar; + SizeInBits = VT.getSizeInBits(); + ElementsOrAddrSpace = 1; + assert(SizeInBits != 0 && "invalid zero-sized type"); + } else { + Kind = Invalid; + SizeInBits = ElementsOrAddrSpace = 0; + } +} + +void LLT::print(raw_ostream &OS) const { + if (isVector()) + OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">"; + else if (isPointer()) + OS << "p" << getAddressSpace(); + else if (isValid()) { + assert(isScalar() && "unexpected type"); + OS << "s" << getScalarSizeInBits(); + } else + llvm_unreachable("trying to print an invalid type"); +} diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp index 6148359da81..cfb71587f2b 100644 --- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp @@ -192,8 +192,8 @@ void AArch64CallLowering::splitToValueTypes( // FIXME: set split flags if they're actually used (e.g. i128 on AAPCS). Type *SplitTy = SplitVT.getTypeForEVT(Ctx); SplitArgs.push_back( - ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy, - OrigArg.Flags, OrigArg.IsFixed}); + ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)), + SplitTy, OrigArg.Flags, OrigArg.IsFixed}); } SmallVector<uint64_t, 4> BitOffsets; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index ae5fb358154..ce70d150e52 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -50,7 +50,7 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, const Function &F = *MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); - LLT PtrType(*PtrTy, DL); + LLT PtrType = getLLTForType(*PtrTy, DL); unsigned DstReg = MRI.createGenericVirtualRegister(PtrType); unsigned KernArgSegmentPtr = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp index 39c7e514791..cb556a3f856 100644 --- a/llvm/lib/Target/X86/X86CallLowering.cpp +++ b/llvm/lib/Target/X86/X86CallLowering.cpp @@ -58,8 +58,9 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg, Type *PartTy = PartVT.getTypeForEVT(Context); for (unsigned i = 0; i < NumParts; ++i) { - ArgInfo Info = ArgInfo{MRI.createGenericVirtualRegister(LLT{*PartTy, DL}), - PartTy, OrigArg.Flags}; + ArgInfo Info = + ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)), + PartTy, OrigArg.Flags}; SplitArgs.push_back(Info); BitOffsets.push_back(PartVT.getSizeInBits() * i); SplitRegs.push_back(Info.Reg); |