summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp24
-rw-r--r--llvm/lib/CodeGen/LowLevelType.cpp55
-rw-r--r--llvm/lib/Support/CMakeLists.txt1
-rw-r--r--llvm/lib/Support/LowLevelType.cpp47
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86CallLowering.cpp5
7 files changed, 77 insertions, 61 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 817428bfb03..da6fca67c8c 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -82,7 +82,8 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
// we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
- unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
+ unsigned VReg =
+ MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
ValReg = VReg;
if (auto CV = dyn_cast<Constant>(&Val)) {
@@ -245,7 +246,7 @@ bool IRTranslator::translateSwitch(const User &U,
const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
const BasicBlock *OrigBB = SwInst.getParent();
- LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL);
+ LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
for (auto &CaseIt : SwInst.cases()) {
const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
@@ -301,7 +302,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Res = getOrCreateVReg(LI);
unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
- LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
+
MIRBuilder.buildLoad(
Res, Addr,
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
@@ -319,8 +320,6 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned Val = getOrCreateVReg(*SI.getValueOperand());
unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
- LLT VTy{*SI.getValueOperand()->getType(), *DL},
- PTy{*SI.getPointerOperand()->getType(), *DL};
MIRBuilder.buildStore(
Val, Addr,
@@ -397,7 +396,8 @@ bool IRTranslator::translateSelect(const User &U,
bool IRTranslator::translateBitCast(const User &U,
MachineIRBuilder &MIRBuilder) {
// If we're bitcasting to the source type, we can reuse the source vreg.
- if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
+ if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
+ getLLTForType(*U.getType(), *DL)) {
// Get the source vreg now, to avoid invalidating ValToVReg.
unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
unsigned &Reg = ValToVReg[&U];
@@ -428,7 +428,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
- LLT PtrTy{*Op0.getType(), *DL};
+ LLT PtrTy = getLLTForType(*Op0.getType(), *DL);
unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
LLT OffsetTy = LLT::scalar(PtrSize);
@@ -494,7 +494,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
bool IRTranslator::translateMemfunc(const CallInst &CI,
MachineIRBuilder &MIRBuilder,
unsigned ID) {
- LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
+ LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
Type *DstTy = CI.getArgOperand(0)->getType();
if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
@@ -551,7 +551,7 @@ void IRTranslator::getStackGuard(unsigned DstReg,
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
- LLT Ty{*CI.getOperand(0)->getType(), *DL};
+ LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
LLT s1 = LLT::scalar(1);
unsigned Width = Ty.getSizeInBits();
unsigned Res = MRI->createGenericVirtualRegister(Ty);
@@ -694,7 +694,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
return true;
case Intrinsic::stackprotector: {
- LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
+ LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
getStackGuard(GuardVal, MIRBuilder);
@@ -844,7 +844,7 @@ bool IRTranslator::translateLandingPad(const User &U,
SmallVector<LLT, 2> Tys;
for (Type *Ty : cast<StructType>(LP.getType())->elements())
- Tys.push_back(LLT{*Ty, *DL});
+ Tys.push_back(getLLTForType(*Ty, *DL));
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
// Mark exception register as live in.
@@ -907,7 +907,7 @@ bool IRTranslator::translateAlloca(const User &U,
MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
- LLT PtrTy = LLT{*AI.getType(), *DL};
+ LLT PtrTy = getLLTForType(*AI.getType(), *DL);
auto &TLI = *MF->getSubtarget().getTargetLowering();
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
diff --git a/llvm/lib/CodeGen/LowLevelType.cpp b/llvm/lib/CodeGen/LowLevelType.cpp
index d74b7306e0f..c4b9068fa90 100644
--- a/llvm/lib/CodeGen/LowLevelType.cpp
+++ b/llvm/lib/CodeGen/LowLevelType.cpp
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/GlobalISel/LowLevelType.cpp --------------------------===//
+//===-- llvm/CodeGen/LowLevelType.cpp -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,54 +18,21 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-LLT::LLT(Type &Ty, const DataLayout &DL) {
+LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
if (auto VTy = dyn_cast<VectorType>(&Ty)) {
- SizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
- ElementsOrAddrSpace = VTy->getNumElements();
- Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
+ auto NumElements = VTy->getNumElements();
+ auto ScalarSizeInBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ if (NumElements == 1)
+ return LLT::scalar(ScalarSizeInBits);
+ return LLT::vector(NumElements, ScalarSizeInBits);
} else if (auto PTy = dyn_cast<PointerType>(&Ty)) {
- Kind = Pointer;
- SizeInBits = DL.getTypeSizeInBits(&Ty);
- ElementsOrAddrSpace = PTy->getAddressSpace();
+ return LLT::pointer(PTy->getAddressSpace(), DL.getTypeSizeInBits(&Ty));
} else if (Ty.isSized()) {
// Aggregates are no different from real scalars as far as GlobalISel is
// concerned.
- Kind = Scalar;
- SizeInBits = DL.getTypeSizeInBits(&Ty);
- ElementsOrAddrSpace = 1;
+ auto SizeInBits = DL.getTypeSizeInBits(&Ty);
assert(SizeInBits != 0 && "invalid zero-sized type");
- } else {
- Kind = Invalid;
- SizeInBits = ElementsOrAddrSpace = 0;
+ return LLT::scalar(SizeInBits);
}
-}
-
-LLT::LLT(MVT VT) {
- if (VT.isVector()) {
- SizeInBits = VT.getVectorElementType().getSizeInBits();
- ElementsOrAddrSpace = VT.getVectorNumElements();
- Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
- } else if (VT.isValid()) {
- // Aggregates are no different from real scalars as far as GlobalISel is
- // concerned.
- Kind = Scalar;
- SizeInBits = VT.getSizeInBits();
- ElementsOrAddrSpace = 1;
- assert(SizeInBits != 0 && "invalid zero-sized type");
- } else {
- Kind = Invalid;
- SizeInBits = ElementsOrAddrSpace = 0;
- }
-}
-
-void LLT::print(raw_ostream &OS) const {
- if (isVector())
- OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
- else if (isPointer())
- OS << "p" << getAddressSpace();
- else if (isValid()) {
- assert(isScalar() && "unexpected type");
- OS << "s" << getScalarSizeInBits();
- } else
- llvm_unreachable("trying to print an invalid type");
+ return LLT();
}
diff --git a/llvm/lib/Support/CMakeLists.txt b/llvm/lib/Support/CMakeLists.txt
index ae2715567c5..a46167e8714 100644
--- a/llvm/lib/Support/CMakeLists.txt
+++ b/llvm/lib/Support/CMakeLists.txt
@@ -71,6 +71,7 @@ add_llvm_library(LLVMSupport
LineIterator.cpp
Locale.cpp
LockFileManager.cpp
+ LowLevelType.cpp
ManagedStatic.cpp
MathExtras.cpp
MemoryBuffer.cpp
diff --git a/llvm/lib/Support/LowLevelType.cpp b/llvm/lib/Support/LowLevelType.cpp
new file mode 100644
index 00000000000..4290d69cd19
--- /dev/null
+++ b/llvm/lib/Support/LowLevelType.cpp
@@ -0,0 +1,47 @@
+//===-- llvm/Support/LowLevelType.cpp -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file implements the more header-heavy bits of the LLT class to
+/// avoid polluting users' namespaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+LLT::LLT(MVT VT) {
+ if (VT.isVector()) {
+ SizeInBits = VT.getVectorElementType().getSizeInBits();
+ ElementsOrAddrSpace = VT.getVectorNumElements();
+ Kind = ElementsOrAddrSpace == 1 ? Scalar : Vector;
+ } else if (VT.isValid()) {
+ // Aggregates are no different from real scalars as far as GlobalISel is
+ // concerned.
+ Kind = Scalar;
+ SizeInBits = VT.getSizeInBits();
+ ElementsOrAddrSpace = 1;
+ assert(SizeInBits != 0 && "invalid zero-sized type");
+ } else {
+ Kind = Invalid;
+ SizeInBits = ElementsOrAddrSpace = 0;
+ }
+}
+
+void LLT::print(raw_ostream &OS) const {
+ if (isVector())
+ OS << "<" << ElementsOrAddrSpace << " x s" << SizeInBits << ">";
+ else if (isPointer())
+ OS << "p" << getAddressSpace();
+ else if (isValid()) {
+ assert(isScalar() && "unexpected type");
+ OS << "s" << getScalarSizeInBits();
+ } else
+ llvm_unreachable("trying to print an invalid type");
+}
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index 4ccd6b68397..ea831eff654 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -196,8 +196,8 @@ void AArch64CallLowering::splitToValueTypes(
// FIXME: set split flags if they're actually used (e.g. i128 on AAPCS).
Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
SplitArgs.push_back(
- ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy,
- OrigArg.Flags, OrigArg.IsFixed});
+ ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
+ SplitTy, OrigArg.Flags, OrigArg.IsFixed});
}
for (unsigned i = 0; i < Offsets.size(); ++i)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index ae5fb358154..ce70d150e52 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -50,7 +50,7 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
const Function &F = *MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
- LLT PtrType(*PtrTy, DL);
+ LLT PtrType = getLLTForType(*PtrTy, DL);
unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
unsigned KernArgSegmentPtr =
TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index dbb74249634..4da5d0d61d8 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -58,8 +58,9 @@ void X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
Type *PartTy = PartVT.getTypeForEVT(Context);
for (unsigned i = 0; i < NumParts; ++i) {
- ArgInfo Info = ArgInfo{MRI.createGenericVirtualRegister(LLT{*PartTy, DL}),
- PartTy, OrigArg.Flags};
+ ArgInfo Info =
+ ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*PartTy, DL)),
+ PartTy, OrigArg.Flags};
SplitArgs.push_back(Info);
PerformArgSplit(Info.Reg, PartVT.getSizeInBits() * i);
}
OpenPOWER on IntegriCloud