diff options
20 files changed, 84 insertions, 86 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 0f932eb6866..9000cdd208b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1216,8 +1216,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { } // Add the offset to the index. - unsigned EltSize = - Vec.getValueType().getVectorElementType().getSizeInBits()/8; + unsigned EltSize = Vec.getValueType().getScalarSizeInBits() / 8; Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType())); @@ -1268,8 +1267,7 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { // Then store the inserted part. // Add the offset to the index. - unsigned EltSize = - Vec.getValueType().getVectorElementType().getSizeInBits()/8; + unsigned EltSize = Vec.getValueType().getScalarSizeInBits() / 8; Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType())); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index da872792275..6fd2a9f0cea 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -1054,7 +1054,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) { // vector element type. Check that any extra bits introduced will be // truncated away. assert(N->getOperand(0).getValueSizeInBits() >= - N->getValueType(0).getVectorElementType().getSizeInBits() && + N->getValueType(0).getScalarSizeInBits() && "Type of inserted value narrower than vector element type!"); SmallVector<SDValue, 16> NewOps; @@ -1084,7 +1084,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, // Check that any extra bits introduced will be truncated away. assert(N->getOperand(1).getValueSizeInBits() >= - N->getValueType(0).getVectorElementType().getSizeInBits() && + N->getValueType(0).getScalarSizeInBits() && "Type of inserted value narrower than vector element type!"); return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), GetPromotedInteger(N->getOperand(1)), diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 37e587a9c9c..d3489a97604 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -795,7 +795,7 @@ void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) { // the vector element type. For example BUILD_VECTOR of type <1 x i1> with // a constant i8 operand. assert(Result.getValueSizeInBits() >= - Op.getValueType().getVectorElementType().getSizeInBits() && + Op.getValueType().getScalarSizeInBits() && "Invalid type for scalarized vector"); AnalyzeNewValue(Result); @@ -913,7 +913,7 @@ SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) { /// Convert to a vector of integers of the same size. SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) { assert(Op.getValueType().isVector() && "Only applies to vectors!"); - unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits(); + unsigned EltWidth = Op.getValueType().getScalarSizeInBits(); EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth); unsigned NumElts = Op.getValueType().getVectorNumElements(); return DAG.getNode(ISD::BITCAST, SDLoc(Op), diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 4681d5b6753..fd433e3d9bf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -817,8 +817,8 @@ SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) { // Now we need sign extend. Do this by shifting the elements. Even if these // aren't legal operations, they have a better chance of being legalized // without full scalarization than the sign extension does. - unsigned EltWidth = VT.getVectorElementType().getSizeInBits(); - unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits(); + unsigned EltWidth = VT.getScalarSizeInBits(); + unsigned SrcEltWidth = SrcVT.getScalarSizeInBits(); SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT); return DAG.getNode(ISD::SRA, DL, VT, DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount), diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 35232787ca1..65d4d613176 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1282,7 +1282,7 @@ void DAGTypeLegalizer::SplitVecRes_ExtendOp(SDNode *N, SDValue &Lo, LLVMContext &Ctx = *DAG.getContext(); EVT NewSrcVT = EVT::getVectorVT( Ctx, EVT::getIntegerVT( - Ctx, SrcVT.getVectorElementType().getSizeInBits() * 2), + Ctx, SrcVT.getScalarSizeInBits() * 2), NumElements); EVT SplitSrcVT = EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2); @@ -1940,8 +1940,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) { // if we're trying to split it at all. assert() that's true, just in case. assert(!(NumElements & 1) && "Splitting vector, but not in half!"); - unsigned InElementSize = InVT.getVectorElementType().getSizeInBits(); - unsigned OutElementSize = OutVT.getVectorElementType().getSizeInBits(); + unsigned InElementSize = InVT.getScalarSizeInBits(); + unsigned OutElementSize = OutVT.getScalarSizeInBits(); // If the input elements are only 1/2 the width of the result elements, // just use the normal splitting. Our trick only work if there's room diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 518a2d6f527..309be2c2253 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -134,7 +134,7 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) { // we care if the resultant vector is all ones, not whether the individual // constants are. SDValue NotZero = N->getOperand(i); - unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); + unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { if (CN->getAPIntValue().countTrailingOnes() < EltSize) return false; @@ -173,7 +173,7 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) { // We only want to check enough bits to cover the vector elements, because // we care if the resultant vector is all zeros, not whether the individual // constants are. - unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); + unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { if (CN->getAPIntValue().countTrailingZeros() < EltSize) return false; @@ -7125,7 +7125,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, // false. unsigned int nOps = getNumOperands(); assert(nOps > 0 && "isConstantSplat has 0-size build vector"); - unsigned EltBitSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltBitSize = VT.getScalarSizeInBits(); for (unsigned j = 0; j < nOps; ++j) { unsigned i = isBigEndian ? nOps-1-j : j; diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 59d5706e789..4c11f2131f2 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1395,7 +1395,7 @@ void TargetLoweringBase::computeRegisterProperties( MVT SVT = (MVT::SimpleValueType) nVT; // Promote vectors of integers to vectors with the same number // of elements, with a wider element type. - if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits() && + if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() && SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) { TransformToType[i] = SVT; RegisterTypeForVT[i] = SVT; diff --git a/llvm/lib/IR/ValueTypes.cpp b/llvm/lib/IR/ValueTypes.cpp index ff1e431c2e9..2132e165922 100644 --- a/llvm/lib/IR/ValueTypes.cpp +++ b/llvm/lib/IR/ValueTypes.cpp @@ -26,7 +26,7 @@ EVT EVT::changeExtendedTypeToInteger() const { EVT EVT::changeExtendedVectorElementTypeToInteger() const { LLVMContext &Context = LLVMTy->getContext(); - EVT IntTy = getIntegerVT(Context, getVectorElementType().getSizeInBits()); + EVT IntTy = getIntegerVT(Context, getScalarSizeInBits()); return getVectorVT(Context, IntTy, getVectorNumElements()); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 568e416873f..7291be33276 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2130,7 +2130,7 @@ static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, for (const SDValue &Elt : N->op_values()) { if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltSize = VT.getScalarSizeInBits(); unsigned HalfSize = EltSize / 2; if (isSigned) { if (!isIntN(HalfSize, C->getSExtValue())) @@ -2157,7 +2157,7 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) { assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); EVT VT = N->getValueType(0); SDLoc dl(N); - unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; + unsigned EltSize = VT.getScalarSizeInBits() / 2; unsigned NumElts = VT.getVectorNumElements(); MVT TruncVT = MVT::getIntegerVT(EltSize); SmallVector<SDValue, 8> Ops; @@ -5028,7 +5028,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, } } unsigned ResMultiplier = - VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits(); + VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); @@ -5113,7 +5113,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, // The stars all align, our next step is to produce the mask for the shuffle. SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); - int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits(); + int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { SDValue Entry = Op.getOperand(i); if (Entry.isUndef()) @@ -5126,8 +5126,8 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, // trunc. So only std::min(SrcBits, DestBits) actually get defined in this // segment. EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); - int BitsDefined = std::min(OrigEltTy.getSizeInBits(), - VT.getVectorElementType().getSizeInBits()); + int BitsDefined = + std::min(OrigEltTy.getSizeInBits(), VT.getScalarSizeInBits()); int LanesDefined = BitsDefined / BitsPerShuffleLane; // This source is expected to fill ResMultiplier lanes of the final shuffle, @@ -5231,7 +5231,7 @@ static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && "Only possible block sizes for REV are: 16, 32, 64"); - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5949,7 +5949,7 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { // Is C1 == ~C2, taking into account how much one can shift elements of a // particular size? uint64_t C2 = C2node->getZExtValue(); - unsigned ElemSizeInBits = VT.getVectorElementType().getSizeInBits(); + unsigned ElemSizeInBits = VT.getScalarSizeInBits(); if (C2 > ElemSizeInBits) return SDValue(); unsigned ElemMask = (1 << ElemSizeInBits) - 1; @@ -6445,7 +6445,7 @@ FailedModImm: if (!isConstant && !usesOnlyOneValue) { SDValue Vec = DAG.getUNDEF(VT); SDValue Op0 = Op.getOperand(0); - unsigned ElemSize = VT.getVectorElementType().getSizeInBits(); + unsigned ElemSize = VT.getScalarSizeInBits(); unsigned i = 0; // For 32 and 64 bit types, use INSERT_SUBREG for lane zero to // a) Avoid a RMW dependency on the full vector register, and @@ -6567,7 +6567,7 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, // If this is extracting the upper 64-bits of a 128-bit vector, we match // that directly. - if (Size == 64 && Val * VT.getVectorElementType().getSizeInBits() == 64) + if (Size == 64 && Val * VT.getScalarSizeInBits() == 64) return Op; return SDValue(); @@ -6637,7 +6637,7 @@ static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { /// 0 <= Value <= ElementBits for a long left shift. static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { assert(VT.isVector() && "vector shift count is not a vector type"); - int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); + int64_t ElementBits = VT.getScalarSizeInBits(); if (!getVShiftImm(Op, ElementBits, Cnt)) return false; return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); @@ -6648,7 +6648,7 @@ static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { /// 1 <= Value <= ElementBits for a right shift; or static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) { assert(VT.isVector() && "vector shift count is not a vector type"); - int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); + int64_t ElementBits = VT.getScalarSizeInBits(); if (!getVShiftImm(Op, ElementBits, Cnt)) return false; return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); @@ -6662,7 +6662,7 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op, if (!Op.getOperand(1).getValueType().isVector()) return Op; - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltSize = VT.getScalarSizeInBits(); switch (Op.getOpcode()) { default: @@ -7937,7 +7937,7 @@ static SDValue tryCombineToBSL(SDNode *N, // We only have to look for constant vectors here since the general, variable // case can be handled in TableGen. - unsigned Bits = VT.getVectorElementType().getSizeInBits(); + unsigned Bits = VT.getScalarSizeInBits(); uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); for (int i = 1; i >= 0; --i) for (int j = 1; j >= 0; --j) { @@ -8126,7 +8126,7 @@ static SDValue performConcatVectorsCombine(SDNode *N, // splat. The indexed instructions are going to be expecting a DUPLANE64, so // canonicalise to that. if (N0 == N1 && VT.getVectorNumElements() == 2) { - assert(VT.getVectorElementType().getSizeInBits() == 64); + assert(VT.getScalarSizeInBits() == 64); return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG), DAG.getConstant(0, dl, MVT::i64)); } @@ -8691,7 +8691,7 @@ static SDValue performExtendCombine(SDNode *N, if (SrcVT.getSizeInBits() != 64) return SDValue(); - unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits(); + unsigned SrcEltSize = SrcVT.getScalarSizeInBits(); unsigned ElementCount = SrcVT.getVectorNumElements(); SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount); SDLoc DL(N); diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index 30586aa2539..cb01b6315ff 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2142,7 +2142,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating, unsigned Alignment = 0; if (NumVecs != 3) { Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); - unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; + unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8; if (Alignment > NumBytes) Alignment = NumBytes; if (Alignment < 8 && Alignment < NumBytes) @@ -2257,7 +2257,7 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs, unsigned Alignment = 0; if (NumVecs != 3) { Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); - unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; + unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8; if (Alignment > NumBytes) Alignment = NumBytes; if (Alignment < 8 && Alignment < NumBytes) diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 225439345d0..23c6b8de57b 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -5626,7 +5626,7 @@ static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && "Only possible block sizes for VREV are: 16, 32, 64"); - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5677,7 +5677,7 @@ static bool isVTBLMask(ArrayRef<int> M, EVT VT) { // want to check the low half and high half of the shuffle mask as if it were // the other case static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5712,7 +5712,7 @@ static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5747,7 +5747,7 @@ static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ // Requires similar checks to that of isVTRNMask with // respect the how results are returned. static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5777,7 +5777,7 @@ static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5818,7 +5818,7 @@ static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ // Requires similar checks to that of isVTRNMask with respect the how results // are returned. static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -5851,7 +5851,7 @@ static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + unsigned EltSz = VT.getScalarSizeInBits(); if (EltSz == 64) return false; @@ -6033,7 +6033,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltSize = VT.getScalarSizeInBits(); // Use VDUP for non-constant splats. For f32 constant splats, reduce to // i32 and try again. @@ -6221,7 +6221,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, SmallestEltTy = SrcEltTy; } unsigned ResMultiplier = - VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits(); + VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); @@ -6307,7 +6307,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, // The stars all align, our next step is to produce the mask for the shuffle. SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); - int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits(); + int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { SDValue Entry = Op.getOperand(i); if (Entry.isUndef()) @@ -6321,7 +6321,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, // segment. EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); int BitsDefined = std::min(OrigEltTy.getSizeInBits(), - VT.getVectorElementType().getSizeInBits()); + VT.getScalarSizeInBits()); int LanesDefined = BitsDefined / BitsPerShuffleLane; // This source is expected to fill ResMultiplier lanes of the final shuffle, @@ -6381,7 +6381,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, bool ReverseVEXT, isV_UNDEF; unsigned Imm, WhichResult; - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltSize = VT.getScalarSizeInBits(); return (EltSize >= 32 || ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isVREVMask(M, VT, 64) || @@ -6524,7 +6524,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { // of the same time so that they get CSEd properly. ArrayRef<int> ShuffleMask = SVN->getMask(); - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltSize = VT.getScalarSizeInBits(); if (EltSize <= 32) { if (SVN->isSplat()) { int Lane = SVN->getSplatIndex(); @@ -6699,7 +6699,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { SDValue Vec = Op.getOperand(0); if (Op.getValueType() == MVT::i32 && - Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { + Vec.getValueType().getScalarSizeInBits() < 32) { SDLoc dl(Op); return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); } @@ -6764,7 +6764,7 @@ static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { SDNode *Elt = N->getOperand(i).getNode(); if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + unsigned EltSize = VT.getScalarSizeInBits(); unsigned HalfSize = EltSize / 2; if (isSigned) { if (!isIntN(HalfSize, C->getSExtValue())) @@ -6891,7 +6891,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { // Construct a new BUILD_VECTOR with elements truncated to half the size. assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); EVT VT = N->getValueType(0); - unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; + unsigned EltSize = VT.getScalarSizeInBits() / 2; unsigned NumElts = VT.getVectorNumElements(); MVT TruncVT = MVT::getIntegerVT(EltSize); SmallVector<SDValue, 8> Ops; @@ -10516,14 +10516,14 @@ static SDValue PerformVDUPLANECombine(SDNode *N, return SDValue(); // Make sure the VMOV element size is not bigger than the VDUPLANE elements. - unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); + unsigned EltSize = Op.getValueType().getScalarSizeInBits(); // The canonical VMOV for a zero vector uses a 32-bit element size. unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); unsigned EltBits; if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) EltSize = 8; EVT VT = N->getValueType(0); - if (EltSize > VT.getVectorElementType().getSizeInBits()) + if (EltSize > VT.getScalarSizeInBits()) return SDValue(); return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); @@ -10560,8 +10560,8 @@ static SDValue PerformSTORECombine(SDNode *N, EVT StVT = St->getMemoryVT(); unsigned NumElems = VT.getVectorNumElements(); assert(StVT != VT && "Cannot truncate to the same type"); - unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); - unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); + unsigned FromEltSz = VT.getScalarSizeInBits(); + unsigned ToEltSz = StVT.getScalarSizeInBits(); // From, To sizes and ElemCount must be pow of two if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); @@ -10829,7 +10829,7 @@ static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { /// 0 <= Value <= ElementBits for a long left shift. static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { assert(VT.isVector() && "vector shift count is not a vector type"); - int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); + int64_t ElementBits = VT.getScalarSizeInBits(); if (! getVShiftImm(Op, ElementBits, Cnt)) return false; return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); @@ -10844,7 +10844,7 @@ static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt) { assert(VT.isVector() && "vector shift count is not a vector type"); - int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); + int64_t ElementBits = VT.getScalarSizeInBits(); if (! getVShiftImm(Op, ElementBits, Cnt)) return false; if (!isIntrinsic) diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index f0a55346276..b00b58cb102 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -816,7 +816,7 @@ void HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) { SDNode *Mask = CurDAG->getMachineNode(Hexagon::C2_mask, dl, MVT::i64, Op0); unsigned NE = OpVT.getVectorNumElements(); EVT ExVT = N->getValueType(0); - unsigned ES = ExVT.getVectorElementType().getSizeInBits(); + unsigned ES = ExVT.getScalarSizeInBits(); uint64_t MV = 0, Bit = 1; for (unsigned i = 0; i < NE; ++i) { MV |= Bit; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index 75a19eddd47..5a6721b70ac 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2389,7 +2389,7 @@ HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) if (UseHVX) { ArrayRef<int> Mask = SVN->getMask(); size_t MaskLen = Mask.size(); - int ElemSizeInBits = VT.getVectorElementType().getSizeInBits(); + int ElemSizeInBits = VT.getScalarSizeInBits(); if ((Subtarget.useHVXSglOps() && (ElemSizeInBits * MaskLen) == 64 * 8) || (Subtarget.useHVXDblOps() && (ElemSizeInBits * MaskLen) == 128 * 8)) { // Return 1 for odd and 2 of even diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index 73c461ba9f0..5964aa7cd1e 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -852,7 +852,7 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, APInt SplatValue, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; - unsigned EltSize = Ty.getVectorElementType().getSizeInBits(); + unsigned EltSize = Ty.getScalarSizeInBits(); BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); if (!Subtarget.hasDSP()) @@ -1504,7 +1504,7 @@ static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) { static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) { SDLoc DL(Op); EVT ResTy = Op->getValueType(0); - APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1) + APInt BitImm = APInt(ResTy.getScalarSizeInBits(), 1) << cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue(); SDValue BitMask = DAG.getConstant(~BitImm, DL, ResTy); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 83a7a3faf36..31d74dc25ce 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -474,7 +474,7 @@ namespace llvm { /// then the VPERM for the shuffle. All in all a very slow sequence. TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const override { - if (VT.getVectorElementType().getSizeInBits() % 8 == 0) + if (VT.getScalarSizeInBits() % 8 == 0) return TypeWidenVector; return TargetLoweringBase::getPreferredVectorAction(VT); } diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 9ec5800e910..41ae785054a 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1309,7 +1309,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) { case ISD::INSERT_VECTOR_ELT: { EVT VT = Node->getValueType(0); - unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits(); + unsigned ElemBitSize = VT.getScalarSizeInBits(); if (ElemBitSize == 32) { if (tryGather(Node, SystemZ::VGEF)) return; diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 1180275eec9..657ca797d15 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3102,7 +3102,7 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, if (VT.isVector()) { Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); - switch (VT.getVectorElementType().getSizeInBits()) { + switch (VT.getScalarSizeInBits()) { case 8: break; case 16: { @@ -4377,7 +4377,7 @@ SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, } // Otherwise bitcast to the equivalent integer form and insert via a GPR. - MVT IntVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits()); + MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), @@ -4417,8 +4417,8 @@ SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG, SDValue PackedOp = Op.getOperand(0); EVT OutVT = Op.getValueType(); EVT InVT = PackedOp.getValueType(); - unsigned ToBits = OutVT.getVectorElementType().getSizeInBits(); - unsigned FromBits = InVT.getVectorElementType().getSizeInBits(); + unsigned ToBits = OutVT.getScalarSizeInBits(); + unsigned FromBits = InVT.getScalarSizeInBits(); do { FromBits *= 2; EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), @@ -4435,7 +4435,7 @@ SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, SDValue Op1 = Op.getOperand(1); SDLoc DL(Op); EVT VT = Op.getValueType(); - unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits(); + unsigned ElemBitSize = VT.getScalarSizeInBits(); // See whether the shift vector is a splat represented as BUILD_VECTOR. if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { @@ -4709,7 +4709,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { // Return true if VT is a vector whose elements are a whole number of bytes // in width. static bool canTreatAsByteVector(EVT VT) { - return VT.isVector() && VT.getVectorElementType().getSizeInBits() % 8 == 0; + return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0; } // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h index 423b246608c..735f4b51289 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -382,7 +382,7 @@ public: // // (c) there are no multiplication instructions for the widest integer // type (v2i64). - if (VT.getVectorElementType().getSizeInBits() % 8 == 0) + if (VT.getScalarSizeInBits() % 8 == 0) return TypeWidenVector; return TargetLoweringBase::getPreferredVectorAction(VT); } diff --git a/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp index 18f71675437..ec3b3b42be6 100644 --- a/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp +++ b/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp @@ -136,7 +136,7 @@ void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) { void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) { unsigned NumElts = VT.getVectorNumElements(); - unsigned Offset = Imm * (VT.getVectorElementType().getSizeInBits() / 8); + unsigned Offset = Imm * (VT.getScalarSizeInBits() / 8); unsigned NumLanes = VT.getSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 592cd4af5e5..b0101370022 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4277,7 +4277,7 @@ static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) { cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); MVT VT = N->getSimpleValueType(0); - unsigned ElSize = VT.getVectorElementType().getSizeInBits(); + unsigned ElSize = VT.getScalarSizeInBits(); bool Result = (Index * ElSize) % vecWidth == 0; return Result; @@ -4295,7 +4295,7 @@ static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) { cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); MVT VT = N->getSimpleValueType(0); - unsigned ElSize = VT.getVectorElementType().getSizeInBits(); + unsigned ElSize = VT.getScalarSizeInBits(); bool Result = (Index * ElSize) % vecWidth == 0; return Result; @@ -12561,10 +12561,10 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, if (!isa<ConstantSDNode>(Idx)) { if (VecVT.is512BitVector() || (VecVT.is256BitVector() && Subtarget.hasInt256() && - VecVT.getVectorElementType().getSizeInBits() == 32)) { + VecVT.getScalarSizeInBits() == 32)) { MVT MaskEltVT = - MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits()); + MVT::getIntegerVT(VecVT.getScalarSizeInBits()); MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() / MaskEltVT.getSizeInBits()); @@ -15673,7 +15673,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget, // In this case use SSE compare bool UseAVX512Inst = (OpVT.is512BitVector() || - OpVT.getVectorElementType().getSizeInBits() >= 32 || + OpVT.getScalarSizeInBits() >= 32 || (Subtarget.hasBWI() && Subtarget.hasVLX())); if (UseAVX512Inst) @@ -16389,7 +16389,7 @@ static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SDValue SignExt = Curr; if (CurrVT != InVT) { unsigned SignExtShift = - CurrVT.getVectorElementType().getSizeInBits() - InSVT.getSizeInBits(); + CurrVT.getScalarSizeInBits() - InSVT.getSizeInBits(); SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr, DAG.getConstant(SignExtShift, dl, MVT::i8)); } @@ -28383,7 +28383,7 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG, if (auto *AmtSplat = AmtBV->getConstantSplatNode()) { const APInt &ShiftAmt = AmtSplat->getAPIntValue(); unsigned MaxAmount = - VT.getSimpleVT().getVectorElementType().getSizeInBits(); + VT.getSimpleVT().getScalarSizeInBits(); // SSE2/AVX2 logical shifts always return a vector of 0s // if the shift amount is bigger than or equal to @@ -28902,7 +28902,7 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG, // Validate that the Mask operand is a vector sra node. // FIXME: what to do for bytes, since there is a psignb/pblendvb, but // there is no psrai.b - unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); + unsigned EltBits = MaskVT.getScalarSizeInBits(); unsigned SraAmt = ~0; if (Mask.getOpcode() == ISD::SRA) { if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1))) @@ -29552,8 +29552,8 @@ static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG, SDLoc dl(Mld); assert(LdVT != VT && "Cannot extend to the same type"); - unsigned ToSz = VT.getVectorElementType().getSizeInBits(); - unsigned FromSz = LdVT.getVectorElementType().getSizeInBits(); + unsigned ToSz = VT.getScalarSizeInBits(); + unsigned FromSz = LdVT.getScalarSizeInBits(); // From/To sizes and ElemCount must be pow of two. assert (isPowerOf2_32(NumElems * FromSz * ToSz) && "Unexpected size for extending masked load"); @@ -29658,8 +29658,8 @@ static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG, SDLoc dl(Mst); assert(StVT != VT && "Cannot truncate to the same type"); - unsigned FromSz = VT.getVectorElementType().getSizeInBits(); - unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); + unsigned FromSz = VT.getScalarSizeInBits(); + unsigned ToSz = StVT.getScalarSizeInBits(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -29787,8 +29787,8 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned NumElems = VT.getVectorNumElements(); assert(StVT != VT && "Cannot truncate to the same type"); - unsigned FromSz = VT.getVectorElementType().getSizeInBits(); - unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); + unsigned FromSz = VT.getScalarSizeInBits(); + unsigned ToSz = StVT.getScalarSizeInBits(); // The truncating store is legal in some cases. For example // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw |