diff options
Diffstat (limited to 'llvm/lib')
24 files changed, 105 insertions, 120 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 6292f118d14..316a04ad777 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2487,8 +2487,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {    if (isOneConstant(N1)) {      SDLoc DL(N);      return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0, -                       DAG.getConstant(N0.getValueType().getSizeInBits() - 1, -                                       DL, +                       DAG.getConstant(N0.getValueSizeInBits() - 1, DL,                                         getShiftAmountTy(N0.getValueType())));    }    // fold (mulhs x, undef) -> 0 @@ -6616,8 +6615,8 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {        SDValue InnerZExt = N0.getOperand(0);        // If the original shl may be shifting out bits, do not perform this        // transformation. -      unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() - -        InnerZExt.getOperand(0).getValueType().getSizeInBits(); +      unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() - +        InnerZExt.getOperand(0).getValueSizeInBits();        if (ShAmtVal > KnownZeroBits)          return SDValue();      } @@ -6878,7 +6877,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {        if ((ShAmt & (EVTBits-1)) == 0) {          N0 = N0.getOperand(0);          // Is the load width a multiple of size of VT? -        if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0) +        if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0)            return SDValue();        } @@ -7587,7 +7586,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {    if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&        isa<ConstantFPSDNode>(N0.getOperand(0)) &&        VT.isInteger() && !VT.isVector()) { -    unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); +    unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits();      EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);      if (isTypeLegal(IntXVT)) {        SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1)); @@ -12292,7 +12291,7 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {      return SDValue();    // Match shift amount to HalfValBitSize. -  unsigned HalfValBitSize = Val.getValueType().getSizeInBits() / 2; +  unsigned HalfValBitSize = Val.getValueSizeInBits() / 2;    ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));    if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)      return SDValue(); @@ -12301,10 +12300,10 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {    // to i64.    if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||        !Lo.getOperand(0).getValueType().isScalarInteger() || -      Lo.getOperand(0).getValueType().getSizeInBits() > HalfValBitSize || +      Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize ||        Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||        !Hi.getOperand(0).getValueType().isScalarInteger() || -      Hi.getOperand(0).getValueType().getSizeInBits() > HalfValBitSize) +      Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize)      return SDValue();    if (!TLI.isMultiStoresCheaperThanBitsMerge(Lo.getOperand(0), diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index f452d629dcf..0f932eb6866 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1663,7 +1663,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, EVT SlotVT,    MachinePointerInfo PtrInfo =        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); -  unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); +  unsigned SrcSize = SrcOp.getValueSizeInBits();    unsigned SlotSize = SlotVT.getSizeInBits();    unsigned DestSize = DestVT.getSizeInBits();    Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 130778f594a..da872792275 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -951,11 +951,11 @@ void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &NewLHS,SDValue &NewRHS,      // than the width of NewLHS/NewRH, we can avoid inserting real truncate      // instruction, which is redudant eventually.      unsigned OpLEffectiveBits = -        OpL.getValueType().getSizeInBits() - DAG.ComputeNumSignBits(OpL) + 1; +        OpL.getValueSizeInBits() - DAG.ComputeNumSignBits(OpL) + 1;      unsigned OpREffectiveBits = -        OpR.getValueType().getSizeInBits() - DAG.ComputeNumSignBits(OpR) + 1; -    if (OpLEffectiveBits <= NewLHS.getValueType().getSizeInBits() && -        OpREffectiveBits <= NewRHS.getValueType().getSizeInBits()) { +        OpR.getValueSizeInBits() - DAG.ComputeNumSignBits(OpR) + 1; +    if (OpLEffectiveBits <= NewLHS.getValueSizeInBits() && +        OpREffectiveBits <= NewRHS.getValueSizeInBits()) {        NewLHS = OpL;        NewRHS = OpR;      } else { @@ -1053,7 +1053,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {    // Promote the inserted value.  The type does not need to match the    // vector element type.  Check that any extra bits introduced will be    // truncated away. -  assert(N->getOperand(0).getValueType().getSizeInBits() >= +  assert(N->getOperand(0).getValueSizeInBits() >=           N->getValueType(0).getVectorElementType().getSizeInBits() &&           "Type of inserted value narrower than vector element type!"); @@ -1083,7 +1083,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,      // have to match the vector element type.      // Check that any extra bits introduced will be truncated away. -    assert(N->getOperand(1).getValueType().getSizeInBits() >= +    assert(N->getOperand(1).getValueSizeInBits() >=             N->getValueType(0).getVectorElementType().getSizeInBits() &&             "Type of inserted value narrower than vector element type!");      return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), @@ -2075,7 +2075,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,      if (ExtType == ISD::SEXTLOAD) {        // The high part is obtained by SRA'ing all but one of the bits of the        // lo part. -      unsigned LoSize = Lo.getValueType().getSizeInBits(); +      unsigned LoSize = Lo.getValueSizeInBits();        Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,                         DAG.getConstant(LoSize - 1, dl,                                         TLI.getPointerTy(DAG.getDataLayout()))); @@ -2446,8 +2446,7 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,             "Operand over promoted?");      // Split the promoted operand.  This will simplify when it is expanded.      SplitInteger(Res, Lo, Hi); -    unsigned ExcessBits = -      Op.getValueType().getSizeInBits() - NVT.getSizeInBits(); +    unsigned ExcessBits = Op.getValueSizeInBits() - NVT.getSizeInBits();      Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,                       DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),                                                          ExcessBits))); @@ -2468,13 +2467,12 @@ ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {      // The high part gets the sign extension from the lo-part.  This handles      // things like sextinreg V:i64 from i8.      Hi = DAG.getNode(ISD::SRA, dl, Hi.getValueType(), Lo, -                     DAG.getConstant(Hi.getValueType().getSizeInBits() - 1, dl, +                     DAG.getConstant(Hi.getValueSizeInBits() - 1, dl,                                       TLI.getPointerTy(DAG.getDataLayout())));    } else {      // For example, extension of an i48 to an i64.  Leave the low part alone,      // sext_inreg the high part. -    unsigned ExcessBits = -      EVT.getSizeInBits() - Lo.getValueType().getSizeInBits(); +    unsigned ExcessBits = EVT.getSizeInBits() - Lo.getValueSizeInBits();      Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,                       DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),                                                          ExcessBits))); @@ -2700,8 +2698,7 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,             "Operand over promoted?");      // Split the promoted operand.  This will simplify when it is expanded.      SplitInteger(Res, Lo, Hi); -    unsigned ExcessBits = -      Op.getValueType().getSizeInBits() - NVT.getSizeInBits(); +    unsigned ExcessBits = Op.getValueSizeInBits() - NVT.getSizeInBits();      Hi = DAG.getZeroExtendInReg(Hi, dl,                                  EVT::getIntegerVT(*DAG.getContext(),                                                    ExcessBits)); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 144bed241ee..37e587a9c9c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -794,7 +794,7 @@ void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {    // Note that in some cases vector operation operands may be greater than    // the vector element type. For example BUILD_VECTOR of type <1 x i1> with    // a constant i8 operand. -  assert(Result.getValueType().getSizeInBits() >= +  assert(Result.getValueSizeInBits() >=           Op.getValueType().getVectorElementType().getSizeInBits() &&           "Invalid type for scalarized vector");    AnalyzeNewValue(Result); @@ -905,7 +905,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {  /// Convert to an integer of the same size.  SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) { -  unsigned BitWidth = Op.getValueType().getSizeInBits(); +  unsigned BitWidth = Op.getValueSizeInBits();    return DAG.getNode(ISD::BITCAST, SDLoc(Op),                       EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);  } @@ -1145,7 +1145,7 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,                                      SDValue &Lo, SDValue &Hi) {    SDLoc dl(Op);    assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() == -         Op.getValueType().getSizeInBits() && "Invalid integer splitting!"); +         Op.getValueSizeInBits() && "Invalid integer splitting!");    Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Op);    Hi = DAG.getNode(ISD::SRL, dl, Op.getValueType(), Op,                     DAG.getConstant(LoVT.getSizeInBits(), dl, @@ -1157,8 +1157,8 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,  /// size of Op's.  void DAGTypeLegalizer::SplitInteger(SDValue Op,                                      SDValue &Lo, SDValue &Hi) { -  EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), -                                 Op.getValueType().getSizeInBits()/2); +  EVT HalfVT = +      EVT::getIntegerVT(*DAG.getContext(), Op.getValueSizeInBits() / 2);    SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);  } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp index 665180e119b..c9bf2c2c409 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -141,11 +141,10 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {          if (DAG.getDataLayout().isBigEndian())            std::swap(LHS, RHS); -        Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, -                                   EVT::getIntegerVT( -                                     *DAG.getContext(), -                                     LHS.getValueType().getSizeInBits() << 1), -                                   LHS, RHS)); +        Vals.push_back(DAG.getNode( +            ISD::BUILD_PAIR, dl, +            EVT::getIntegerVT(*DAG.getContext(), LHS.getValueSizeInBits() << 1), +            LHS, RHS));        }        Lo = Vals[Slot++];        Hi = Vals[Slot++]; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 49f830bf5a0..4681d5b6753 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -951,7 +951,7 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {    // If the mask and the type are different sizes, unroll the vector op. This    // can occur when getSetCCResultType returns something that is different in    // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8. -  if (VT.getSizeInBits() != Op1.getValueType().getSizeInBits()) +  if (VT.getSizeInBits() != Op1.getValueSizeInBits())      return DAG.UnrollVectorOp(Op.getNode());    // Bitcast the operands to be the same type as the mask. diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index f3adca49ccf..35232787ca1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -882,7 +882,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,        DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo());    // Increment the pointer to the other part. -  unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8; +  unsigned IncrementSize = Lo.getValueSizeInBits() / 8;    StackPtr =        DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,                    DAG.getConstant(IncrementSize, dl, StackPtr.getValueType())); @@ -1014,7 +1014,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,        DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo());    // Increment the pointer to the other part. -  unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8; +  unsigned IncrementSize = Lo.getValueSizeInBits() / 8;    StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,                           DAG.getConstant(IncrementSize, dl,                                           StackPtr.getValueType())); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index b54d245ccf4..518a2d6f527 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1022,7 +1022,7 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {  SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,                                                EVT VT) {    assert(VT.isVector() && "This DAG node is restricted to vector types."); -  assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && +  assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&           "The sizes of the input and result must match in order to perform the "           "extend in-register.");    assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && @@ -1033,7 +1033,7 @@ SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,  SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,                                                 EVT VT) {    assert(VT.isVector() && "This DAG node is restricted to vector types."); -  assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && +  assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&           "The sizes of the input and result must match in order to perform the "           "extend in-register.");    assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && @@ -1044,7 +1044,7 @@ SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,  SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,                                                 EVT VT) {    assert(VT.isVector() && "This DAG node is restricted to vector types."); -  assert(VT.getSizeInBits() == Op.getValueType().getSizeInBits() && +  assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&           "The sizes of the input and result must match in order to perform the "           "extend in-register.");    assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() && @@ -2441,7 +2441,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,      computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);      const unsigned Index =        cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); -    const unsigned BitWidth = Op.getValueType().getSizeInBits(); +    const unsigned BitWidth = Op.getValueSizeInBits();      // Remove low part of known bits mask      KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth); @@ -2707,9 +2707,8 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {      break;    case ISD::EXTRACT_ELEMENT: {      const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); -    const int BitWidth = Op.getValueType().getSizeInBits(); -    const int Items = -      Op.getOperand(0).getValueType().getSizeInBits() / BitWidth; +    const int BitWidth = Op.getValueSizeInBits(); +    const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;      // Get reverse index (starting from 1), Op1 value indexes elements from      // little end. Sign starts at big end. @@ -3162,8 +3161,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,      break;    case ISD::BITCAST:      // Basic sanity checking. -    assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() -           && "Cannot BITCAST between types of different sizes!"); +    assert(VT.getSizeInBits() == Operand.getValueSizeInBits() && +           "Cannot BITCAST between types of different sizes!");      if (VT == Operand.getValueType()) return Operand;  // noop conversion.      if (OpOpcode == ISD::BITCAST)  // bitconv(bitconv(x)) -> bitconv(x)        return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); @@ -3577,8 +3576,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,      // amounts.  This catches things like trying to shift an i1024 value by an      // i8, which is easy to fall into in generic code that uses      // TLI.getShiftAmount(). -    assert(N2.getValueType().getSizeInBits() >= -                   Log2_32_Ceil(N1.getValueType().getSizeInBits()) && +    assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&             "Invalid use of small shift amount with oversized value!");      // Always fold shifts of i1 values so the code generator doesn't need to diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index e67915b2909..ceabccca2ab 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -183,7 +183,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,          Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);          Hi =              DAG.getNode(ISD::SHL, DL, TotalVT, Hi, -                        DAG.getConstant(Lo.getValueType().getSizeInBits(), DL, +                        DAG.getConstant(Lo.getValueSizeInBits(), DL,                                          TLI.getPointerTy(DAG.getDataLayout())));          Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);          Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); @@ -2638,7 +2638,7 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {    // Coerce the shift amount to the right type if we can.    if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {      unsigned ShiftSize = ShiftTy.getSizeInBits(); -    unsigned Op2Size = Op2.getValueType().getSizeInBits(); +    unsigned Op2Size = Op2.getValueSizeInBits();      SDLoc DL = getCurSDLoc();      // If the operand is smaller than the shift count type, promote it. @@ -2649,7 +2649,7 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {      // count type has enough bits to represent any shift value, truncate      // it now. This is a common case and it exposes the truncate to      // optimization early. -    else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits())) +    else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))        Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);      // Otherwise we'll need to temporarily settle for some other convenient      // type.  Type legalization will make adjustments once the shiftee is split. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 9d06a4f64be..3958b7ba0f5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -3491,7 +3491,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,                  NodeToMatch->getValueType(i) == MVT::iPTR ||                  Res.getValueType() == MVT::iPTR ||                  NodeToMatch->getValueType(i).getSizeInBits() == -                    Res.getValueType().getSizeInBits()) && +                    Res.getValueSizeInBits()) &&                 "invalid replacement");          CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res);        } diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index a8d6847e93a..9540f5d49c5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -351,8 +351,7 @@ spillIncomingStatepointValue(SDValue Incoming, SDValue Chain,      // can consider allowing spills of smaller values to larger slots      // (i.e. change the '==' in the assert below to a '>=').      MachineFrameInfo &MFI = Builder.DAG.getMachineFunction().getFrameInfo(); -    assert((MFI.getObjectSize(Index) * 8) == -               Incoming.getValueType().getSizeInBits() && +    assert((MFI.getObjectSize(Index) * 8) == Incoming.getValueSizeInBits() &&             "Bad spill:  stack slot does not match!");  #endif diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 62784154fa0..0c04636424c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1133,7 +1133,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,      if (!TLO.LegalOperations() &&          !Op.getValueType().isVector() &&          !Op.getOperand(0).getValueType().isVector() && -        NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) && +        NewMask == APInt::getSignBit(Op.getValueSizeInBits()) &&          Op.getOperand(0).getValueType().isFloatingPoint()) {        bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());        bool i32Legal  = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); @@ -1144,10 +1144,10 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,          // Make a FGETSIGN + SHL to move the sign bit into the appropriate          // place.  We expect the SHL to be eliminated by other optimizations.          SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0)); -        unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits(); +        unsigned OpVTSizeInBits = Op.getValueSizeInBits();          if (!OpVTLegal && OpVTSizeInBits > 32)            Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign); -        unsigned ShVal = Op.getValueType().getSizeInBits()-1; +        unsigned ShVal = Op.getValueSizeInBits() - 1;          SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, Op.getValueType());          return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,                                                   Op.getValueType(), @@ -1414,7 +1414,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,        const APInt &ShAmt          = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();        if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && -          ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { +          ShAmt == Log2_32(N0.getValueSizeInBits())) {          if ((C1 == 0) == (Cond == ISD::SETEQ)) {            // (srl (ctlz x), 5) == 0  -> X != 0            // (srl (ctlz x), 5) != 1  -> X != 0 @@ -1436,8 +1436,8 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,        CTPOP = N0.getOperand(0);      if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP && -        (N0 == CTPOP || N0.getValueType().getSizeInBits() > -                        Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) { +        (N0 == CTPOP || +         N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) {        EVT CTVT = CTPOP.getValueType();        SDValue CTOp = CTPOP.getOperand(0); @@ -1558,7 +1558,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,        APInt bestMask;        unsigned bestWidth = 0, bestOffset = 0;        if (!Lod->isVolatile() && Lod->isUnindexed()) { -        unsigned origWidth = N0.getValueType().getSizeInBits(); +        unsigned origWidth = N0.getValueSizeInBits();          unsigned maskWidth = origWidth;          // We can narrow (e.g.) 16-bit extending loads on 32-bit target to          // 8 bits, but have to be careful... @@ -1605,7 +1605,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,      // If the LHS is a ZERO_EXTEND, perform the comparison on the input.      if (N0.getOpcode() == ISD::ZERO_EXTEND) { -      unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); +      unsigned InSize = N0.getOperand(0).getValueSizeInBits();        // If the comparison constant has bits in the upper part, the        // zero-extended value could never match. diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 0c98687b23e..6b7d517b5d0 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -349,7 +349,7 @@ bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,      return false;    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { -    unsigned BitSize = N.getValueType().getSizeInBits(); +    unsigned BitSize = N.getValueSizeInBits();      unsigned Val = RHS->getZExtValue() & (BitSize - 1);      unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 40c57649acb..568e416873f 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3715,7 +3715,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {          // Don't combine AND since emitComparison converts the AND to an ANDS          // (a.k.a. TST) and the test in the test bit and branch instruction          // becomes redundant.  This would also increase register pressure. -        uint64_t Mask = LHS.getValueType().getSizeInBits() - 1; +        uint64_t Mask = LHS.getValueSizeInBits() - 1;          return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,                             DAG.getConstant(Mask, dl, MVT::i64), Dest);        } @@ -3725,7 +3725,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {        // Don't combine AND since emitComparison converts the AND to an ANDS        // (a.k.a. TST) and the test in the test bit and branch instruction        // becomes redundant.  This would also increase register pressure. -      uint64_t Mask = LHS.getValueType().getSizeInBits() - 1; +      uint64_t Mask = LHS.getValueSizeInBits() - 1;        return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,                           DAG.getConstant(Mask, dl, MVT::i64), Dest);      } @@ -5412,7 +5412,7 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {        VT.getVectorElementType() != V1.getValueType().getVectorElementType())      return SDValue(); -  bool SplitV0 = V0.getValueType().getSizeInBits() == 128; +  bool SplitV0 = V0.getValueSizeInBits() == 128;    if (!isConcatMask(Mask, VT, SplitV0))      return SDValue(); @@ -5423,7 +5423,7 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {      V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,                       DAG.getConstant(0, DL, MVT::i64));    } -  if (V1.getValueType().getSizeInBits() == 128) { +  if (V1.getValueSizeInBits() == 128) {      V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,                       DAG.getConstant(0, DL, MVT::i64));    } @@ -5554,7 +5554,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,    MVT IndexVT = MVT::v8i8;    unsigned IndexLen = 8; -  if (Op.getValueType().getSizeInBits() == 128) { +  if (Op.getValueSizeInBits() == 128) {      IndexVT = MVT::v16i8;      IndexLen = 16;    } @@ -6382,7 +6382,7 @@ FailedModImm:        // DUPLANE works on 128-bit vectors, widen it if necessary.        SDValue Lane = Value.getOperand(1);        Value = Value.getOperand(0); -      if (Value.getValueType().getSizeInBits() == 64) +      if (Value.getValueSizeInBits() == 64)          Value = WidenVector(Value, DAG);        unsigned Opcode = getDUPLANEOp(VT.getVectorElementType()); @@ -6559,7 +6559,7 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,      return SDValue();    unsigned Val = Cst->getZExtValue(); -  unsigned Size = Op.getValueType().getSizeInBits(); +  unsigned Size = Op.getValueSizeInBits();    // This will get lowered to an appropriate EXTRACT_SUBREG in ISel.    if (Val == 0) @@ -7686,7 +7686,7 @@ static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,      return SDValue();    // Only optimize when the source and destination types have the same width. -  if (VT.getSizeInBits() != N->getOperand(0).getValueType().getSizeInBits()) +  if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())      return SDValue();    // If the result of an integer load is only used by an integer-to-float @@ -8189,7 +8189,7 @@ static SDValue tryCombineFixedPointConvert(SDNode *N,      // The vector width should be 128 bits by the time we get here, even      // if it started as 64 bits (the extract_vector handling will have      // done so). -    assert(Vec.getValueType().getSizeInBits() == 128 && +    assert(Vec.getValueSizeInBits() == 128 &&             "unexpected vector size on extract_vector_elt!");      if (Vec.getValueType() == MVT::v4i32)        VecResTy = MVT::v4f32; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index f26b6d601b1..225439345d0 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -2235,7 +2235,7 @@ static  bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,                           MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,                           const TargetInstrInfo *TII) { -  unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; +  unsigned Bytes = Arg.getValueSizeInBits() / 8;    int FI = INT_MAX;    if (Arg.getOpcode() == ISD::CopyFromReg) {      unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index ec9a8645c90..75a19eddd47 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2612,7 +2612,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {          continue;        if (VT.getSizeInBits() == 64 && -          Operand.getValueType().getSizeInBits() == 32) { +          Operand.getValueSizeInBits() == 32) {          SDValue C = DAG.getConstant(0, dl, MVT::i32);          Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);        } @@ -2677,7 +2677,7 @@ HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,      unsigned N = NElts-i-1;      SDValue OpN = Op.getOperand(N); -    if (VT.getSizeInBits() == 64 && OpN.getValueType().getSizeInBits() == 32) { +    if (VT.getSizeInBits() == 64 && OpN.getValueSizeInBits() == 32) {        SDValue C = DAG.getConstant(0, dl, MVT::i32);        OpN = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, OpN);      } @@ -2857,8 +2857,7 @@ HexagonTargetLowering::LowerINSERT_VECTOR(SDValue Op,                                  DAG.getConstant(32, dl, MVT::i64));    SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset); -  if (VT.getSizeInBits() == 64 && -      Val.getValueType().getSizeInBits() == 32) { +  if (VT.getSizeInBits() == 64 && Val.getValueSizeInBits() == 32) {      SDValue C = DAG.getConstant(0, dl, MVT::i32);      Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);    } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 7ab15ee94cf..43c478f4212 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -4911,7 +4911,7 @@ bool NVPTXDAGToDAGISel::tryBFE(SDNode *N) {          uint64_t StartVal = StartConst->getZExtValue();          // How many "good" bits do we have left?  "good" is defined here as bits          // that exist in the original value, not shifted in. -        uint64_t GoodBits = Start.getValueType().getSizeInBits() - StartVal; +        uint64_t GoodBits = Start.getValueSizeInBits() - StartVal;          if (NumBits > GoodBits) {            // Do not handle the case where bits have been shifted in. In theory            // we could handle this, but the cost is likely higher than just @@ -5019,15 +5019,14 @@ bool NVPTXDAGToDAGISel::tryBFE(SDNode *N) {        // If the outer shift is more than the type size, we have no bitfield to        // extract (since we also check that the inner shift is <= the outer shift        // then this also implies that the inner shift is < the type size) -      if (OuterShiftAmt >= Val.getValueType().getSizeInBits()) { +      if (OuterShiftAmt >= Val.getValueSizeInBits()) {          return false;        } -      Start = -        CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, DL, MVT::i32); -      Len = -        CurDAG->getTargetConstant(Val.getValueType().getSizeInBits() - -                                  OuterShiftAmt, DL, MVT::i32); +      Start = CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, DL, +                                        MVT::i32); +      Len = CurDAG->getTargetConstant(Val.getValueSizeInBits() - OuterShiftAmt, +                                      DL, MVT::i32);        if (N->getOpcode() == ISD::SRA) {          // If we have a arithmetic right shift, we need to use the signed bfe diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 95556a71c9c..2182f5820e5 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -2444,7 +2444,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,        //      11 elem => 3 st.v4        unsigned VecSize = 4; -      if (OutVals[0].getValueType().getSizeInBits() == 64) +      if (OutVals[0].getValueSizeInBits() == 64)          VecSize = 2;        unsigned Offset = 0; @@ -2532,7 +2532,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,            TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);            TheStoreType = MVT::i32;          } -        else if (TmpVal.getValueType().getSizeInBits() < 16) +        else if (TmpVal.getValueSizeInBits() < 16)            TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);          SDValue Ops[] = { diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 91eb0514ed1..8b5fc515189 100644 --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -3200,7 +3200,7 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {            Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ &&            isa<ConstantSDNode>(Op0.getOperand(1))) { -        unsigned Bits = Op0.getValueType().getSizeInBits(); +        unsigned Bits = Op0.getValueSizeInBits();          if (b != Bits/8-1)            return false;          if (Op0.getConstantOperandVal(1) != Bits-8) @@ -3228,9 +3228,9 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {          // Now we need to make sure that the upper bytes are known to be          // zero. -        unsigned Bits = Op0.getValueType().getSizeInBits(); -        if (!CurDAG->MaskedValueIsZero(Op0, -              APInt::getHighBitsSet(Bits, Bits - (b+1)*8))) +        unsigned Bits = Op0.getValueSizeInBits(); +        if (!CurDAG->MaskedValueIsZero( +                Op0, APInt::getHighBitsSet(Bits, Bits - (b + 1) * 8)))            return false;          LHS = Op0.getOperand(0); @@ -3263,7 +3263,7 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {      } else if (Op.getOpcode() == ISD::SRL) {        if (!isa<ConstantSDNode>(Op.getOperand(1)))          return false; -      unsigned Bits = Op.getValueType().getSizeInBits(); +      unsigned Bits = Op.getValueSizeInBits();        if (b != Bits/8-1)          return false;        if (Op.getConstantOperandVal(1) != Bits-8) diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 41c0387c6ce..cd75474a76a 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -4218,7 +4218,7 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,                           SDValue Arg, int SPDiff, unsigned ArgOffset,                       SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {    int Offset = ArgOffset + SPDiff; -  uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; +  uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;    int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);    EVT VT = isPPC64 ? MVT::i64 : MVT::i32;    SDValue FIN = DAG.getFrameIndex(FI, VT); diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 1290fc1cb23..9ec5800e910 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -117,7 +117,7 @@ static uint64_t allOnes(unsigned int Count) {  // case the result will be truncated as part of the operation).  struct RxSBGOperands {    RxSBGOperands(unsigned Op, SDValue N) -    : Opcode(Op), BitSize(N.getValueType().getSizeInBits()), +    : Opcode(Op), BitSize(N.getValueSizeInBits()),        Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),        Rotate(0) {} @@ -709,7 +709,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,    // It's only an insertion if all bits are covered or are known to be zero.    // The inner check covers all cases but is more expensive. -  uint64_t Used = allOnes(Op.getValueType().getSizeInBits()); +  uint64_t Used = allOnes(Op.getValueSizeInBits());    if (Used != (AndMask | InsertMask)) {      APInt KnownZero, KnownOne;      CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne); @@ -749,7 +749,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {    case ISD::TRUNCATE: {      if (RxSBG.Opcode == SystemZ::RNSBG)        return false; -    uint64_t BitSize = N.getValueType().getSizeInBits(); +    uint64_t BitSize = N.getValueSizeInBits();      uint64_t Mask = allOnes(BitSize);      if (!refineRxSBGMask(RxSBG, Mask))        return false; @@ -825,7 +825,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {    case ISD::ZERO_EXTEND:      if (RxSBG.Opcode != SystemZ::RNSBG) {        // Restrict the mask to the extended operand. -      unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits(); +      unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();        if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))          return false; @@ -837,7 +837,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {    case ISD::SIGN_EXTEND: {      // Check that the extension bits are don't-care (i.e. are masked out      // by the final mask). -    unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits(); +    unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();      if (maskMatters(RxSBG, allOnes(RxSBG.BitSize) - allOnes(InnerBitSize)))        return false; @@ -851,7 +851,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {        return false;      uint64_t Count = CountNode->getZExtValue(); -    unsigned BitSize = N.getValueType().getSizeInBits(); +    unsigned BitSize = N.getValueSizeInBits();      if (Count < 1 || Count >= BitSize)        return false; @@ -878,7 +878,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {        return false;      uint64_t Count = CountNode->getZExtValue(); -    unsigned BitSize = N.getValueType().getSizeInBits(); +    unsigned BitSize = N.getValueSizeInBits();      if (Count < 1 || Count >= BitSize)        return false; @@ -1136,8 +1136,7 @@ bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {    SDValue Value = Store->getValue();    if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)      return false; -  if (Store->getMemoryVT().getSizeInBits() != -      Value.getValueType().getSizeInBits()) +  if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits())      return false;    SDValue ElemV = Value.getOperand(1); @@ -1323,7 +1322,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {    case ISD::STORE: {      auto *Store = cast<StoreSDNode>(Node); -    unsigned ElemBitSize = Store->getValue().getValueType().getSizeInBits(); +    unsigned ElemBitSize = Store->getValue().getValueSizeInBits();      if (ElemBitSize == 32) {        if (tryScatter(Store, SystemZ::VSCEF))          return; diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 3d6e39f3a1b..1180275eec9 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1860,8 +1860,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,        C.Op1.getOpcode() == ISD::Constant &&        cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {      auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); -    if (L->getMemoryVT().getStoreSizeInBits() -        <= C.Op0.getValueType().getSizeInBits()) { +    if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {        unsigned Type = L->getExtensionType();        if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||            (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { @@ -1880,7 +1879,7 @@ static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {      return false;    uint64_t Amount = Shift->getZExtValue(); -  if (Amount >= N.getValueType().getSizeInBits()) +  if (Amount >= N.getValueSizeInBits())      return false;    ShiftVal = Amount; @@ -2031,7 +2030,7 @@ static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,    // Check whether the combination of mask, comparison value and comparison    // type are suitable. -  unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); +  unsigned BitSize = NewC.Op0.getValueSizeInBits();    unsigned NewCCMask, ShiftVal;    if (NewC.ICmpType != SystemZICMP::SignedOnly &&        NewC.Op0.getOpcode() == ISD::SHL && @@ -4771,7 +4770,7 @@ SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,        // We're extracting the low part of one operand of the BUILD_VECTOR.        Op = Op.getOperand(End / OpBytesPerElement - 1);        if (!Op.getValueType().isInteger()) { -        EVT VT = MVT::getIntegerVT(Op.getValueType().getSizeInBits()); +        EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());          Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);          DCI.AddToWorklist(Op.getNode());        } @@ -4871,8 +4870,7 @@ SDValue SystemZTargetLowering::combineSIGN_EXTEND(      SDValue Inner = N0.getOperand(0);      if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {        if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { -        unsigned Extra = (VT.getSizeInBits() - -                          N0.getValueType().getSizeInBits()); +        unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());          unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;          unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;          EVT ShiftVT = N0.getOperand(1).getValueType(); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index f46dbe46cea..592cd4af5e5 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3498,7 +3498,7 @@ static  bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,                           MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,                           const X86InstrInfo *TII, const CCValAssign &VA) { -  unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; +  unsigned Bytes = Arg.getValueSizeInBits() / 8;    for (;;) {      // Look through nodes that don't alter the bits of the incoming value. @@ -3567,7 +3567,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,    if (Offset != MFI.getObjectOffset(FI))      return false; -  if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) { +  if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {      // If the argument location is wider than the argument type, check that any      // extension flags match.      if (Flags.isZExt() != MFI.isObjectZExt(FI) || @@ -5842,7 +5842,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,        // AVX-512 has register version of the broadcast        bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() && -        Ld.getValueType().getSizeInBits() >= 32; +                       Ld.getValueSizeInBits() >= 32;        if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&            !hasRegVer))          return SDValue(); @@ -5850,7 +5850,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,      }    } -  unsigned ScalarSize = Ld.getValueType().getSizeInBits(); +  unsigned ScalarSize = Ld.getValueSizeInBits();    bool IsGE256 = (VT.getSizeInBits() >= 256);    // When optimizing for size, generate up to 5 extra bytes for a broadcast @@ -6041,8 +6041,7 @@ static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {        Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;    }    SDLoc dl(Op); -  MVT VT = -   MVT::getIntegerVT(std::max((int)Op.getValueType().getSizeInBits(), 8)); +  MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));    return DAG.getConstant(Immediate, dl, VT);  }  // Lower BUILD_VECTOR operation for v8i1 and v16i1 types. @@ -7263,7 +7262,7 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,    bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());    bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode()); -  int VectorSizeInBits = V1.getValueType().getSizeInBits(); +  int VectorSizeInBits = V1.getValueSizeInBits();    int ScalarSizeInBits = VectorSizeInBits / Mask.size();    assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size"); @@ -13609,7 +13608,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,      // shouldn't be necessary except that RFP cannot be live across      // multiple blocks. When stackifier is fixed, they can be uncoupled.      MachineFunction &MF = DAG.getMachineFunction(); -    unsigned SSFISize = Op.getValueType().getSizeInBits()/8; +    unsigned SSFISize = Op.getValueSizeInBits()/8;      int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);      auto PtrVT = getPointerTy(MF.getDataLayout());      SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); @@ -31392,8 +31391,7 @@ static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,    // If the reduction vector is at least as wide as the psadbw result, just    // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero    // anyway. -  MVT ResVT = -      MVT::getVectorVT(MVT::i32, Sad.getValueType().getSizeInBits() / 32); +  MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);    if (VT.getSizeInBits() >= ResVT.getSizeInBits())      Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);    else diff --git a/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp b/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp index 61fbf0dc3d2..c03b0afceba 100644 --- a/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp +++ b/llvm/lib/Target/XCore/XCoreSelectionDAGInfo.cpp @@ -20,7 +20,7 @@ SDValue XCoreSelectionDAGInfo::EmitTargetCodeForMemcpy(      SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,      SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,      MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { -  unsigned SizeBitWidth = Size.getValueType().getSizeInBits(); +  unsigned SizeBitWidth = Size.getValueSizeInBits();    // Call __memcpy_4 if the src, dst and size are all 4 byte aligned.    if (!AlwaysInline && (Align & 3) == 0 &&        DAG.MaskedValueIsZero(Size, APInt(SizeBitWidth, 3))) {  | 

