diff options
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 110 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 14 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 62 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 13 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsSEISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/Sparc/SparcISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/SystemZ/SystemZISelLowering.cpp | 22 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 88 |
14 files changed, 179 insertions, 196 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index b7c468a9579..b4aba63cb4b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1648,9 +1648,9 @@ SDValue DAGCombiner::visitADD(SDNode *N) { } // fold (add x, undef) -> undef - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return N0; - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return N1; if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) { // canonicalize constant to RHS @@ -1925,9 +1925,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { N0.getOperand(0), N0.getOperand(1).getOperand(0)); // If either operand of a sub is undef, the result is undef - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return N0; - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return N1; // If the relocation model supports it, consider symbol offsets. @@ -2005,7 +2005,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) { EVT VT = N0.getValueType(); // fold (mul x, undef) -> 0 - if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) + if (N0.isUndef() || N1.isUndef()) return DAG.getConstant(0, SDLoc(N), VT); bool N0IsConst = false; @@ -2313,10 +2313,10 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) { return DivRem; // undef / X -> 0 - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return DAG.getConstant(0, DL, VT); // X / undef -> undef - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return N1; return SDValue(); @@ -2377,10 +2377,10 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) { return DivRem; // undef / X -> 0 - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return DAG.getConstant(0, DL, VT); // X / undef -> undef - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return N1; return SDValue(); @@ -2461,10 +2461,10 @@ SDValue DAGCombiner::visitREM(SDNode *N) { return DivRem.getValue(1); // undef % X -> 0 - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return DAG.getConstant(0, DL, VT); // X % undef -> undef - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return N1; return SDValue(); @@ -2488,7 +2488,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) { getShiftAmountTy(N0.getValueType()))); } // fold (mulhs x, undef) -> 0 - if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) + if (N0.isUndef() || N1.isUndef()) return DAG.getConstant(0, SDLoc(N), VT); // If the type twice as wide is legal, transform the mulhs to a wider multiply @@ -2524,7 +2524,7 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) { if (isOneConstant(N1)) return DAG.getConstant(0, DL, N0.getValueType()); // fold (mulhu x, undef) -> 0 - if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) + if (N0.isUndef() || N1.isUndef()) return DAG.getConstant(0, DL, VT); // If the type twice as wide is legal, transform the mulhu to a wider multiply @@ -2866,7 +2866,7 @@ SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, EVT VT = N1.getValueType(); // fold (and x, undef) -> 0 - if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) + if (N0.isUndef() || N1.isUndef()) return DAG.getConstant(0, SDLoc(LocReference), VT); // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) SDValue LL, LR, RL, RR, CC0, CC1; @@ -3574,7 +3574,7 @@ SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *LocReference) { EVT VT = N1.getValueType(); // fold (or x, undef) -> -1 if (!LegalOperations && - (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) { + (N0.isUndef() || N1.isUndef())) { EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), SDLoc(LocReference), VT); @@ -4091,12 +4091,12 @@ SDValue DAGCombiner::visitXOR(SDNode *N) { } // fold (xor undef, undef) -> 0. This is a common idiom (misuse). - if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) + if (N0.isUndef() && N1.isUndef()) return DAG.getConstant(0, SDLoc(N), VT); // fold (xor x, undef) -> undef - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return N0; - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return N1; // fold (xor c1, c2) -> c1^c2 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); @@ -4396,7 +4396,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { if (N1C && N1C->isNullValue()) return N0; // fold (shl undef, x) -> 0 - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return DAG.getConstant(0, SDLoc(N), VT); // if (shl x, c) is known to be zero, return 0 if (DAG.MaskedValueIsZero(SDValue(N, 0), @@ -5233,7 +5233,7 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { // length of the BV and see if all the non-undef nodes are the same. ConstantSDNode *BottomHalf = nullptr; for (int i = 0; i < NumElems / 2; ++i) { - if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF) + if (Cond->getOperand(i)->isUndef()) continue; if (BottomHalf == nullptr) @@ -5245,7 +5245,7 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { // Do the same for the second half of the BuildVector ConstantSDNode *TopHalf = nullptr; for (int i = NumElems / 2; i < NumElems; ++i) { - if (Cond->getOperand(i)->getOpcode() == ISD::UNDEF) + if (Cond->getOperand(i)->isUndef()) continue; if (TopHalf == nullptr) @@ -5671,7 +5671,7 @@ SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { return N2; // cond always true -> true val else return N3; // cond always false -> false val - } else if (SCC->getOpcode() == ISD::UNDEF) { + } else if (SCC->isUndef()) { // When the condition is UNDEF, just return the first operand. This is // coherent the DAG creation, no setcc node is created in this case return N2; @@ -5752,7 +5752,7 @@ static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI, for (unsigned i=0; i != NumElts; ++i) { SDValue Op = N0->getOperand(i); - if (Op->getOpcode() == ISD::UNDEF) { + if (Op->isUndef()) { Elts.push_back(DAG.getUNDEF(SVT)); continue; } @@ -6993,7 +6993,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return DAG.getUNDEF(VT); if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, @@ -7007,7 +7007,7 @@ SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); - if (N0.getOpcode() == ISD::UNDEF) + if (N0.isUndef()) return DAG.getUNDEF(VT); if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, @@ -7600,7 +7600,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { // Shift the previously computed bits over. NewBits <<= SrcBitSize; SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); - if (Op.getOpcode() == ISD::UNDEF) continue; + if (Op.isUndef()) continue; EltIsUndef = false; NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). @@ -7625,7 +7625,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { SmallVector<SDValue, 8> Ops; for (const SDValue &Op : BV->op_values()) { - if (Op.getOpcode() == ISD::UNDEF) { + if (Op.isUndef()) { Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT)); continue; } @@ -11261,7 +11261,7 @@ void DAGCombiner::getStoreMergeAndAliasCandidates( return; // Do not handle stores to undef base pointers. - if (BasePtr.Base.getOpcode() == ISD::UNDEF) + if (BasePtr.Base.isUndef()) return; // Walk up the chain and look for nodes with offsets from the same @@ -11894,7 +11894,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { } // Turn 'store undef, Ptr' -> nothing. - if (Value.getOpcode() == ISD::UNDEF && ST->isUnindexed()) + if (Value.isUndef() && ST->isUnindexed()) return Chain; // Try to infer better alignment information than the store already has. @@ -12040,7 +12040,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { SDLoc dl(N); // If the inserted element is an UNDEF, just use the input vector. - if (InVal.getOpcode() == ISD::UNDEF) + if (InVal.isUndef()) return InVec; EVT VT = InVec.getValueType(); @@ -12084,7 +12084,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) { Ops.append(InVec.getNode()->op_begin(), InVec.getNode()->op_end()); - } else if (InVec.getOpcode() == ISD::UNDEF) { + } else if (InVec.isUndef()) { unsigned NElts = VT.getVectorNumElements(); Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); } else { @@ -12405,7 +12405,7 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { for (unsigned i = 0; i != NumInScalars; ++i) { SDValue In = N->getOperand(i); // Ignore undef inputs. - if (In.getOpcode() == ISD::UNDEF) continue; + if (In.isUndef()) continue; bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; @@ -12460,9 +12460,9 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { SDValue Cast = N->getOperand(i); assert((Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || - Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode"); + Cast.isUndef()) && "Invalid cast opcode"); SDValue In; - if (Cast.getOpcode() == ISD::UNDEF) + if (Cast.isUndef()) In = DAG.getUNDEF(SourceType); else In = Cast->getOperand(0); @@ -12549,7 +12549,7 @@ SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { for (unsigned i = 0; i != NumInScalars; ++i) { SDValue In = N->getOperand(i); - if (In.getOpcode() == ISD::UNDEF) + if (In.isUndef()) Opnds.push_back(DAG.getUNDEF(SrcVT)); else Opnds.push_back(In.getOperand(0)); @@ -12592,7 +12592,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { for (unsigned i = 0; i != NumInScalars; ++i) { SDValue Op = N->getOperand(i); // Ignore undef inputs. - if (Op.getOpcode() == ISD::UNDEF) continue; + if (Op.isUndef()) continue; // See if we can combine this build_vector into a blend with a zero vector. if (!VecIn2.getNode() && (isNullConstant(Op) || isNullFPConstant(Op))) { @@ -12782,7 +12782,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) { for (SDValue &Op : Ops) { if (Op.getValueType() == SVT) continue; - if (Op.getOpcode() == ISD::UNDEF) + if (Op.isUndef()) Op = ScalarUndef; else Op = DAG.getNode(ISD::BITCAST, DL, SVT, Op); @@ -12815,7 +12815,7 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) { Op = Op.getOperand(0); // UNDEF nodes convert to UNDEF shuffle mask values. - if (Op.getOpcode() == ISD::UNDEF) { + if (Op.isUndef()) { Mask.append((unsigned)NumOpElts, -1); continue; } @@ -12835,7 +12835,7 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) { ExtVec = ExtVec.getOperand(0); // UNDEF nodes convert to UNDEF shuffle mask values. - if (ExtVec.getOpcode() == ISD::UNDEF) { + if (ExtVec.isUndef()) { Mask.append((unsigned)NumOpElts, -1); continue; } @@ -12859,11 +12859,11 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) { return SDValue(); // At most we can reference 2 inputs in the final shuffle. - if (SV0.getOpcode() == ISD::UNDEF || SV0 == ExtVec) { + if (SV0.isUndef() || SV0 == ExtVec) { SV0 = ExtVec; for (int i = 0; i != NumOpElts; ++i) Mask.push_back(i + ExtIdx); - } else if (SV1.getOpcode() == ISD::UNDEF || SV1 == ExtVec) { + } else if (SV1.isUndef() || SV1 == ExtVec) { SV1 = ExtVec; for (int i = 0; i != NumOpElts; ++i) Mask.push_back(i + ExtIdx + NumElts); @@ -12891,7 +12891,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { // Optimize concat_vectors where all but the first of the vectors are undef. if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) { - return Op.getOpcode() == ISD::UNDEF; + return Op.isUndef(); })) { SDValue In = N->getOperand(0); assert(In.getValueType().isVector() && "Must concat vectors"); @@ -12995,7 +12995,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { SDValue Op = N->getOperand(i); - if (Op.getOpcode() == ISD::UNDEF) + if (Op.isUndef()) continue; // Check if this is the identity extract: @@ -13195,7 +13195,7 @@ static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { // Special case: shuffle(concat(A,B)) can be more efficiently represented // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high // half vector elements. - if (NumElemsPerConcat * 2 == NumElts && N1.getOpcode() == ISD::UNDEF && + if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() && std::all_of(SVN->getMask().begin() + NumElemsPerConcat, SVN->getMask().end(), [](int i) { return i == -1; })) { N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0), N0.getOperand(1), @@ -13251,7 +13251,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); // Canonicalize shuffle undef, undef -> undef - if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) + if (N0.isUndef() && N1.isUndef()) return DAG.getUNDEF(VT); ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); @@ -13269,7 +13269,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { } // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. - if (N0.getOpcode() == ISD::UNDEF) { + if (N0.isUndef()) { SmallVector<int, 8> NewMask; for (unsigned i = 0; i != NumElts; ++i) { int Idx = SVN->getMaskElt(i); @@ -13286,7 +13286,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { } // Remove references to rhs if it is undef - if (N1.getOpcode() == ISD::UNDEF) { + if (N1.isUndef()) { bool Changed = false; SmallVector<int, 8> NewMask; for (unsigned i = 0; i != NumElts; ++i) { @@ -13362,7 +13362,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { if (N0.getOpcode() == ISD::CONCAT_VECTORS && Level < AfterLegalizeVectorOps && - (N1.getOpcode() == ISD::UNDEF || + (N1.isUndef() || (N1.getOpcode() == ISD::CONCAT_VECTORS && N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { if (SDValue V = partitionShuffleOfConcats(N, DAG)) @@ -13410,7 +13410,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { // attempt to merge the 2 shuffles and suitably bitcast the inputs/output // back to their original types. if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && - N1.getOpcode() == ISD::UNDEF && Level < AfterLegalizeVectorOps && + N1.isUndef() && Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) { // Peek through the bitcast only if there is one user. @@ -13496,7 +13496,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { SDValue SV0 = N1->getOperand(0); SDValue SV1 = N1->getOperand(1); bool HasSameOp0 = N0 == SV0; - bool IsSV1Undef = SV1.getOpcode() == ISD::UNDEF; + bool IsSV1Undef = SV1.isUndef(); if (HasSameOp0 || IsSV1Undef || N0 == SV1) // Commute the operands of this shuffle so that next rule // will trigger. @@ -13549,7 +13549,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { } // Simple case where 'CurrentVec' is UNDEF. - if (CurrentVec.getOpcode() == ISD::UNDEF) { + if (CurrentVec.isUndef()) { Mask.push_back(-1); continue; } @@ -13729,7 +13729,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { int EltIdx = i / Split; int SubIdx = i % Split; SDValue Elt = RHS.getOperand(EltIdx); - if (Elt.getOpcode() == ISD::UNDEF) { + if (Elt.isUndef()) { Indices.push_back(-1); continue; } @@ -13808,8 +13808,8 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { // -> (shuffle (VBinOp (A, B)), Undef, Mask). if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) && isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && - LHS.getOperand(1).getOpcode() == ISD::UNDEF && - RHS.getOperand(1).getOpcode() == ISD::UNDEF) { + LHS.getOperand(1).isUndef() && + RHS.getOperand(1).isUndef()) { ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS); ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS); @@ -14768,7 +14768,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode* St) { return false; // Do not handle stores to undef base pointers. - if (BasePtr.Base.getOpcode() == ISD::UNDEF) + if (BasePtr.Base.isUndef()) return false; SmallVector<StoreSDNode *, 8> ChainedStores; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 12661496232..788528f758a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1597,7 +1597,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { // Store (in the right endianness) the elements to memory. for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { // Ignore undef elements. - if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; + if (Node->getOperand(i).isUndef()) continue; unsigned Offset = TypeByteSize*i; @@ -2029,7 +2029,7 @@ ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG, NewIntermedVals; for (unsigned i = 0; i < NumElems; ++i) { SDValue V = Node->getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; SDValue Vec; @@ -2121,7 +2121,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { bool isConstant = true; for (unsigned i = 0; i < NumElems; ++i) { SDValue V = Node->getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; if (i > 0) isOnlyLowElement = false; @@ -2164,7 +2164,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { CI->getZExtValue())); } } else { - assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); + assert(Node->getOperand(i).isUndef()); Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); CV.push_back(UndefValue::get(OpNTy)); } @@ -2181,7 +2181,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { SmallSet<SDValue, 16> DefinedValues; for (unsigned i = 0; i < NumElems; ++i) { - if (Node->getOperand(i).getOpcode() == ISD::UNDEF) + if (Node->getOperand(i).isUndef()) continue; DefinedValues.insert(Node->getOperand(i)); } @@ -2191,7 +2191,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { SmallVector<int, 8> ShuffleVec(NumElems, -1); for (unsigned i = 0; i < NumElems; ++i) { SDValue V = Node->getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; ShuffleVec[i] = V == Value1 ? 0 : NumElems; } @@ -3790,7 +3790,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { Node->getOperand(2)); } else { // We test only the i1 bit. Skip the AND if UNDEF. - Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : + Tmp3 = (Tmp2.isUndef()) ? Tmp2 : DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, DAG.getConstant(1, dl, Tmp2.getValueType())); Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 575eb3ca087..fcc6e4a3e48 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -370,7 +370,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) { SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) { // Figure out if the scalar is the LHS or RHS and return it. SDValue Arg = N->getOperand(2).getOperand(0); - if (Arg.getOpcode() == ISD::UNDEF) + if (Arg.isUndef()) return DAG.getUNDEF(N->getValueType(0).getVectorElementType()); unsigned Op = !cast<ConstantSDNode>(Arg)->isNullValue(); return GetScalarizedVector(N->getOperand(Op)); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 592f5152558..fc4116b43af 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -106,7 +106,7 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) { unsigned i = 0, e = N->getNumOperands(); // Skip over all of the undef values. - while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) + while (i != e && N->getOperand(i).isUndef()) ++i; // Do not accept an all-undef vector. @@ -153,7 +153,7 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) { bool IsAllUndef = true; for (const SDValue &Op : N->op_values()) { - if (Op.getOpcode() == ISD::UNDEF) + if (Op.isUndef()) continue; IsAllUndef = false; // Do not accept build_vectors that aren't all constants or which have non-0 @@ -188,7 +188,7 @@ bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { return false; for (const SDValue &Op : N->op_values()) { - if (Op.getOpcode() == ISD::UNDEF) + if (Op.isUndef()) continue; if (!isa<ConstantSDNode>(Op)) return false; @@ -203,7 +203,7 @@ bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { return false; for (const SDValue &Op : N->op_values()) { - if (Op.getOpcode() == ISD::UNDEF) + if (Op.isUndef()) continue; if (!isa<ConstantFPSDNode>(Op)) return false; @@ -1488,7 +1488,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, "Invalid VECTOR_SHUFFLE"); // Canonicalize shuffle undef, undef -> undef - if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF) + if (N1.isUndef() && N2.isUndef()) return getUNDEF(VT); // Validate that all indices in Mask are within the range of the elements @@ -1508,7 +1508,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, } // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) commuteShuffle(N1, N2, MaskVec); // If shuffling a splat, try to blend the splat instead. We do this here so @@ -1542,7 +1542,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, // Canonicalize all index into lhs, -> shuffle lhs, undef // Canonicalize all index into rhs, -> shuffle rhs, undef bool AllLHS = true, AllRHS = true; - bool N2Undef = N2.getOpcode() == ISD::UNDEF; + bool N2Undef = N2.isUndef(); for (unsigned i = 0; i != NElts; ++i) { if (MaskVec[i] >= (int)NElts) { if (N2Undef) @@ -1562,9 +1562,9 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, commuteShuffle(N1, N2, MaskVec); } // Reset our undef status after accounting for the mask. - N2Undef = N2.getOpcode() == ISD::UNDEF; + N2Undef = N2.isUndef(); // Re-check whether both sides ended up undef. - if (N1.getOpcode() == ISD::UNDEF && N2Undef) + if (N1.isUndef() && N2Undef) return getUNDEF(VT); // If Identity shuffle return that node. @@ -1590,7 +1590,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, BitVector UndefElements; SDValue Splat = BV->getSplatValue(&UndefElements); // If this is a splat of an undef, shuffling it is also undef. - if (Splat && Splat.getOpcode() == ISD::UNDEF) + if (Splat && Splat.isUndef()) return getUNDEF(VT); bool SameNumElts = @@ -3056,7 +3056,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, "Vector element count mismatch!"); assert(Operand.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!"); - if (Operand.getOpcode() == ISD::UNDEF) + if (Operand.isUndef()) return getUNDEF(VT); break; case ISD::SIGN_EXTEND: @@ -3384,8 +3384,8 @@ SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, SDLoc DL, auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op); - return (Op.getOpcode() == ISD::UNDEF) || - (Op.getOpcode() == ISD::CONDCODE) || (BV && BV->isConstant()); + return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || + (BV && BV->isConstant()); }; // All operands must be vector types with the same number of elements as @@ -3653,7 +3653,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SmallVector<SDValue, 8> Ops; for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { SDValue Op = N1.getOperand(i); - if (Op.getOpcode() == ISD::UNDEF) { + if (Op.isUndef()) { Ops.push_back(getUNDEF(VT.getScalarType())); continue; } @@ -3672,7 +3672,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, } case ISD::EXTRACT_VECTOR_ELT: // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) return getUNDEF(VT); // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF @@ -3831,7 +3831,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, } // Canonicalize an UNDEF to the RHS, even over a constant. - if (N1.getOpcode() == ISD::UNDEF) { + if (N1.isUndef()) { if (isCommutativeBinOp(Opcode)) { std::swap(N1, N2); } else { @@ -3860,10 +3860,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, } // Fold a bunch of operators when the RHS is undef. - if (N2.getOpcode() == ISD::UNDEF) { + if (N2.isUndef()) { switch (Opcode) { case ISD::XOR: - if (N1.getOpcode() == ISD::UNDEF) + if (N1.isUndef()) // Handle undef ^ undef -> 0 special case. This is a common // idiom (misuse). return getConstant(0, DL, VT); @@ -4300,7 +4300,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { // Turn a memcpy of undef to nop. - if (Src.getOpcode() == ISD::UNDEF) + if (Src.isUndef()) return Chain; // Expand memcpy to a series of load and store ops if the size operand falls @@ -4418,7 +4418,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { // Turn a memmove of undef to nop. - if (Src.getOpcode() == ISD::UNDEF) + if (Src.isUndef()) return Chain; // Expand memmove to a series of load and store ops if the size operand falls @@ -4515,7 +4515,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl, unsigned Align, bool isVol, MachinePointerInfo DstPtrInfo) { // Turn a memset of undef to nop. - if (Src.getOpcode() == ISD::UNDEF) + if (Src.isUndef()) return Chain; // Expand memset to a series of load/store ops if the size operand @@ -5057,7 +5057,7 @@ static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, // If the 'Offset' value isn't a constant, we can't handle this. if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); - if (OffsetOp.getOpcode() == ISD::UNDEF) + if (OffsetOp.isUndef()) return InferPointerInfo(DAG, Ptr); return MachinePointerInfo(); } @@ -5119,8 +5119,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, } bool Indexed = AM != ISD::UNINDEXED; - assert((Indexed || Offset.getOpcode() == ISD::UNDEF) && - "Unindexed load with an offset!"); + assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); @@ -5192,8 +5191,7 @@ SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM) { LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); - assert(LD->getOffset().getOpcode() == ISD::UNDEF && - "Load is already a indexed load!"); + assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, LD->getChain(), Base, Offset, LD->getPointerInfo(), LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), @@ -5204,8 +5202,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const AAMDNodes &AAInfo) { - assert(Chain.getValueType() == MVT::Other && - "Invalid chain type"); + assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(Val.getValueType()); @@ -5329,8 +5326,7 @@ SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM) { StoreSDNode *ST = cast<StoreSDNode>(OrigStore); - assert(ST->getOffset().getOpcode() == ISD::UNDEF && - "Store is already a indexed store!"); + assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; FoldingSetNodeID ID; @@ -7185,7 +7181,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, SDValue OpVal = getOperand(i); unsigned BitPos = j * EltBitSize; - if (OpVal.getOpcode() == ISD::UNDEF) + if (OpVal.isUndef()) SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). @@ -7231,7 +7227,7 @@ SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { SDValue Splatted; for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { SDValue Op = getOperand(i); - if (Op.getOpcode() == ISD::UNDEF) { + if (Op.isUndef()) { if (UndefElements) (*UndefElements)[i] = true; } else if (!Splatted) { @@ -7242,7 +7238,7 @@ SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { } if (!Splatted) { - assert(getOperand(0).getOpcode() == ISD::UNDEF && + assert(getOperand(0).isUndef() && "Can only have a splat without a constant for all undefs."); return getOperand(0); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 798627df319..8ec3387b599 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2901,8 +2901,8 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) { // Pad both vectors with undefs to make them the same length as the mask. unsigned NumConcat = MaskNumElts / SrcNumElts; - bool Src1U = Src1.getOpcode() == ISD::UNDEF; - bool Src2U = Src2.getOpcode() == ISD::UNDEF; + bool Src1U = Src1.isUndef(); + bool Src2U = Src2.isUndef(); SDValue UndefVal = DAG.getUNDEF(SrcVT); SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index cb78d6ddb0f..e606512350d 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4973,7 +4973,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, SmallVector<ShuffleSourceInfo, 2> Sources; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT || !isa<ConstantSDNode>(V.getOperand(1))) { @@ -5097,7 +5097,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits(); for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { SDValue Entry = Op.getOperand(i); - if (Entry.getOpcode() == ISD::UNDEF) + if (Entry.isUndef()) continue; auto Src = std::find(Sources.begin(), Sources.end(), Entry.getOperand(0)); @@ -5545,7 +5545,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask, SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2); SDValue Shuffle; - if (V2.getNode()->getOpcode() == ISD::UNDEF) { + if (V2.getNode()->isUndef()) { if (IndexLen == 8) V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst); Shuffle = DAG.getNode( @@ -5658,8 +5658,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, Imm *= getExtFactor(V1); return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2, DAG.getConstant(Imm, dl, MVT::i32)); - } else if (V2->getOpcode() == ISD::UNDEF && - isSingletonEXTMask(ShuffleMask, VT, Imm)) { + } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) { Imm *= getExtFactor(V1); return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1, DAG.getConstant(Imm, dl, MVT::i32)); @@ -6327,7 +6326,7 @@ FailedModImm: SDValue ConstantValue; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; if (i > 0) isOnlyLowElement = false; @@ -6449,7 +6448,7 @@ FailedModImm: } for (; i < NumElts; ++i) { SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp index bd4517d443d..75f9d63dffb 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1847,7 +1847,7 @@ static SDValue CompactSwizzlableVector( }; for (unsigned i = 0; i < 4; i++) { - if (NewBldVec[i].getOpcode() == ISD::UNDEF) + if (NewBldVec[i].isUndef()) // We mask write here to teach later passes that the ith element of this // vector is undef. Thus we can use it to reduce 128 bits reg usage, // break false dependencies and additionnaly make assembly easier to read. @@ -1862,7 +1862,7 @@ static SDValue CompactSwizzlableVector( } } - if (NewBldVec[i].getOpcode() == ISD::UNDEF) + if (NewBldVec[i].isUndef()) continue; for (unsigned j = 0; j < i; j++) { if (NewBldVec[i] == NewBldVec[j]) { @@ -2001,7 +2001,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, SDLoc dl(N); // If the inserted element is an UNDEF, just use the input vector. - if (InVal.getOpcode() == ISD::UNDEF) + if (InVal.isUndef()) return InVec; EVT VT = InVec.getValueType(); @@ -2022,7 +2022,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, if (InVec.getOpcode() == ISD::BUILD_VECTOR) { Ops.append(InVec.getNode()->op_begin(), InVec.getNode()->op_end()); - } else if (InVec.getOpcode() == ISD::UNDEF) { + } else if (InVec.isUndef()) { unsigned NElts = VT.getVectorNumElements(); Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); } else { diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 34689bf0939..3cb9264431a 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -5562,7 +5562,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, SDValue Value; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; if (i > 0) isOnlyLowElement = false; @@ -5695,7 +5695,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, SDValue Vec = DAG.getUNDEF(VT); for (unsigned i = 0 ; i < NumElts; ++i) { SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); @@ -5741,7 +5741,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, SmallVector<ShuffleSourceInfo, 2> Sources; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { // A shuffle can only come from building a vector from various @@ -5868,7 +5868,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits(); for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { SDValue Entry = Op.getOperand(i); - if (Entry.getOpcode() == ISD::UNDEF) + if (Entry.isUndef()) continue; auto Src = std::find(Sources.begin(), Sources.end(), Entry.getOperand(0)); @@ -6042,7 +6042,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); - if (V2.getNode()->getOpcode() == ISD::UNDEF) + if (V2.getNode()->isUndef()) return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask)); @@ -6127,8 +6127,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { if (isVREVMask(ShuffleMask, VT, 16)) return DAG.getNode(ARMISD::VREV16, dl, VT, V1); - if (V2->getOpcode() == ISD::UNDEF && - isSingletonVEXTMask(ShuffleMask, VT, Imm)) { + if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, DAG.getConstant(Imm, dl, MVT::i32)); } @@ -6163,8 +6162,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { // -> // concat(VZIP(v1, v2):0, :1) // - if (V1->getOpcode() == ISD::CONCAT_VECTORS && - V2->getOpcode() == ISD::UNDEF) { + if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { SDValue SubV1 = V1->getOperand(0); SDValue SubV2 = V1->getOperand(1); EVT SubVT = SubV1.getValueType(); @@ -9483,7 +9481,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // Assume only bit cast to i32 will go away. if (Elt->getOperand(0).getValueType() == MVT::i32) ++NumOfBitCastedElts; - } else if (Elt.getOpcode() == ISD::UNDEF || isa<ConstantSDNode>(Elt)) + } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) // Constants are statically casted, thus do not count them as // relevant operands. --NumOfRelevantElts; @@ -9510,7 +9508,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { SDLoc dl(N); for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { SDValue V = N->getOperand(Idx); - if (V.getOpcode() == ISD::UNDEF) + if (V.isUndef()) continue; if (V.getOpcode() == ISD::BITCAST && V->getOperand(0).getValueType() == MVT::i32) diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index 3edd97a6b21..c219596811a 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2298,7 +2298,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { SDLoc dl(Op); EVT VT = Op.getValueType(); - if (V2.getOpcode() == ISD::UNDEF) + if (V2.isUndef()) V2 = V1; if (SVN->isSplat()) { @@ -2438,9 +2438,9 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { SDValue V0 = BVN->getOperand(0); SDValue V1 = BVN->getOperand(1); - if (V0.getOpcode() == ISD::UNDEF) + if (V0.isUndef()) V0 = DAG.getConstant(0, dl, MVT::i32); - if (V1.getOpcode() == ISD::UNDEF) + if (V1.isUndef()) V1 = DAG.getConstant(0, dl, MVT::i32); ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(V0); @@ -2460,7 +2460,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // Try to generate a S2_packhl to build v2i16 vectors. if (VT.getSimpleVT() == MVT::v2i16) { for (unsigned i = 0, e = NElts; i != e; ++i) { - if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) + if (BVN->getOperand(i).isUndef()) continue; ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(BVN->getOperand(i)); // If the element isn't a constant, it is in a register: @@ -2488,7 +2488,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // combine, const64, etc. are Big Endian. unsigned OpIdx = NElts - i - 1; SDValue Operand = BVN->getOperand(OpIdx); - if (Operand.getOpcode() == ISD::UNDEF) + if (Operand.isUndef()) continue; int64_t Val = 0; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index e040b782564..b1b6aba66f3 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -2311,7 +2311,7 @@ lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { } static bool isConstantOrUndef(const SDValue Op) { - if (Op->getOpcode() == ISD::UNDEF) + if (Op->isUndef()) return true; if (isa<ConstantSDNode>(Op)) return true; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index d8fa2caa3c3..9edded8f746 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1516,7 +1516,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { // See if all of the elements in the buildvector agree across. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { - if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; + if (N->getOperand(i).isUndef()) continue; // If the element isn't a constant, bail fully out. if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); @@ -1562,7 +1562,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { // Check to see if this buildvec has a single non-undef value in its elements. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { - if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; + if (N->getOperand(i).isUndef()) continue; if (!OpVal.getNode()) OpVal = N->getOperand(i); else if (OpVal != N->getOperand(i)) @@ -6824,7 +6824,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, bool IsConst = true; for (unsigned i = 0; i < 4; ++i) { - if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; + if (BVN->getOperand(i).isUndef()) continue; if (!isa<ConstantSDNode>(BVN->getOperand(i))) { IsConst = false; break; @@ -6839,7 +6839,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SmallVector<Constant*, 4> CV(4, NegOne); for (unsigned i = 0; i < 4; ++i) { - if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) + if (BVN->getOperand(i).isUndef()) CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); else if (isNullConstant(BVN->getOperand(i))) continue; @@ -6867,7 +6867,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SmallVector<SDValue, 4> Stores; for (unsigned i = 0; i < 4; ++i) { - if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue; + if (BVN->getOperand(i).isUndef()) continue; unsigned Offset = 4*i; SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); @@ -7175,7 +7175,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, if (VT.getVectorNumElements() != 4) return SDValue(); - if (V2.getOpcode() == ISD::UNDEF) V2 = V1; + if (V2.isUndef()) V2 = V1; int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); if (AlignIdx != -1) { @@ -7213,7 +7213,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, // Cases that are handled by instructions that take permute immediates // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be // selected by the instruction selector. - if (V2.getOpcode() == ISD::UNDEF) { + if (V2.isUndef()) { if (PPC::isSplatShuffleMask(SVOp, 1) || PPC::isSplatShuffleMask(SVOp, 2) || PPC::isSplatShuffleMask(SVOp, 4) || @@ -7311,7 +7311,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant // vector that will get spilled to the constant pool. - if (V2.getOpcode() == ISD::UNDEF) V2 = V1; + if (V2.isUndef()) V2 = V1; // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except // that it is in input element units, not in bytes. Convert now. diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index 4ce211fd5f5..37613126d6f 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2671,7 +2671,7 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) { SDLoc dl(Op); LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode()); - assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF + assert(LdNode && LdNode->getOffset().isUndef() && "Unexpected node type"); unsigned alignment = LdNode->getAlignment(); @@ -2732,7 +2732,7 @@ static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { SDLoc dl(Op); StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode()); - assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF + assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type"); SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32); SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32); diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 0cafa21678a..29939730307 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3703,7 +3703,7 @@ void GeneralShuffle::add(SDValue Op, unsigned Elem) { } Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); Byte = unsigned(NewByte) % SystemZ::VectorBytes; - } else if (Op.getOpcode() == ISD::UNDEF) { + } else if (Op.isUndef()) { addUndef(); return; } else @@ -3822,7 +3822,7 @@ static SDValue buildScalarToVector(SelectionDAG &DAG, SDLoc DL, EVT VT, SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops); } - if (Value.getOpcode() == ISD::UNDEF) + if (Value.isUndef()) return DAG.getUNDEF(VT); return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); } @@ -3831,12 +3831,12 @@ static SDValue buildScalarToVector(SelectionDAG &DAG, SDLoc DL, EVT VT, // element 1. Used for cases in which replication is cheap. static SDValue buildMergeScalars(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue Op0, SDValue Op1) { - if (Op0.getOpcode() == ISD::UNDEF) { - if (Op1.getOpcode() == ISD::UNDEF) + if (Op0.isUndef()) { + if (Op1.isUndef()) return DAG.getUNDEF(VT); return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); } - if (Op1.getOpcode() == ISD::UNDEF) + if (Op1.isUndef()) return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, buildScalarToVector(DAG, DL, VT, Op0), @@ -3847,13 +3847,13 @@ static SDValue buildMergeScalars(SelectionDAG &DAG, SDLoc DL, EVT VT, // vector for them. static SDValue joinDwords(SelectionDAG &DAG, SDLoc DL, SDValue Op0, SDValue Op1) { - if (Op0.getOpcode() == ISD::UNDEF && Op1.getOpcode() == ISD::UNDEF) + if (Op0.isUndef() && Op1.isUndef()) return DAG.getUNDEF(MVT::v2i64); // If one of the two inputs is undefined then replicate the other one, // in order to avoid using another register unnecessarily. - if (Op0.getOpcode() == ISD::UNDEF) + if (Op0.isUndef()) Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); - else if (Op1.getOpcode() == ISD::UNDEF) + else if (Op1.isUndef()) Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); else { Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); @@ -3955,7 +3955,7 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); GS.add(Op.getOperand(0), Elem); FoundOne = true; - } else if (Op.getOpcode() == ISD::UNDEF) { + } else if (Op.isUndef()) { GS.addUndef(); } else { GS.add(SDValue(), ResidueOps.size()); @@ -4034,9 +4034,9 @@ static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); // Avoid unnecessary undefs by reusing the other operand. - if (Op01.getOpcode() == ISD::UNDEF) + if (Op01.isUndef()) Op01 = Op23; - else if (Op23.getOpcode() == ISD::UNDEF) + else if (Op23.isUndef()) Op23 = Op01; // Merging identical replications is a no-op. if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 72a91241d8d..693940ffb1d 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4585,7 +4585,7 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal, VT.getVectorNumElements()/Factor); // Extract from UNDEF is UNDEF. - if (Vec.getOpcode() == ISD::UNDEF) + if (Vec.isUndef()) return DAG.getUNDEF(ResultVT); // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR @@ -4631,7 +4631,7 @@ static SDValue InsertSubVector(SDValue Result, SDValue Vec, assert((vectorWidth == 128 || vectorWidth == 256) && "Unsupported vector width"); // Inserting UNDEF is Result - if (Vec.getOpcode() == ISD::UNDEF) + if (Vec.isUndef()) return Result; EVT VT = Vec.getValueType(); EVT ElVT = VT.getVectorElementType(); @@ -4942,7 +4942,7 @@ static bool getTargetShuffleMaskIndices(SDValue MaskNode, for (int i = 0, e = MaskNode.getNumOperands(); i < e; ++i) { SDValue Op = MaskNode.getOperand(i); - if (Op->getOpcode() == ISD::UNDEF) { + if (Op->isUndef()) { RawMask.push_back((uint64_t)SM_SentinelUndef); continue; } @@ -5472,7 +5472,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG, std::bitset<4> Zeroable; for (int i=0; i < 4; ++i) { SDValue Elt = Op->getOperand(i); - Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)); + Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt)); } assert(Zeroable.size() - Zeroable.count() > 1 && "We expect at least two non-zero elements!"); @@ -6161,7 +6161,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { int SplatIdx = -1; for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) { SDValue In = Op.getOperand(idx); - if (In.getOpcode() == ISD::UNDEF) + if (In.isUndef()) continue; if (!isa<ConstantSDNode>(In)) NonConstIdx.push_back(idx); @@ -6245,7 +6245,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode, SDValue Op = N->getOperand(i + BaseIdx); // Skip UNDEFs. - if (Op->getOpcode() == ISD::UNDEF) { + if (Op->isUndef()) { // Update the expected vector extract index. if (i * 2 == NumElts) ExpectedVExtractIdx = BaseIdx; @@ -6275,13 +6275,13 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode, unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue(); if (i * 2 < NumElts) { - if (V0.getOpcode() == ISD::UNDEF) { + if (V0.isUndef()) { V0 = Op0.getOperand(0); if (V0.getValueType() != VT) return false; } } else { - if (V1.getOpcode() == ISD::UNDEF) { + if (V1.isUndef()) { V1 = Op0.getOperand(0); if (V1.getValueType() != VT) return false; @@ -6439,12 +6439,12 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV, SubFound = true; // Update InVec0 and InVec1. - if (InVec0.getOpcode() == ISD::UNDEF) { + if (InVec0.isUndef()) { InVec0 = Op0.getOperand(0); if (InVec0.getSimpleValueType() != VT) return SDValue(); } - if (InVec1.getOpcode() == ISD::UNDEF) { + if (InVec1.isUndef()) { InVec1 = Op1.getOperand(0); if (InVec1.getSimpleValueType() != VT) return SDValue(); @@ -6490,11 +6490,11 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, // Count the number of UNDEF operands in the build_vector in input. for (unsigned i = 0, e = Half; i != e; ++i) - if (BV->getOperand(i)->getOpcode() == ISD::UNDEF) + if (BV->getOperand(i)->isUndef()) NumUndefsLO++; for (unsigned i = Half, e = NumElts; i != e; ++i) - if (BV->getOperand(i)->getOpcode() == ISD::UNDEF) + if (BV->getOperand(i)->isUndef()) NumUndefsHI++; // Early exit if this is either a build_vector of all UNDEFs or all the @@ -6529,18 +6529,14 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, SDValue InVec2, InVec3; if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) && isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) && - ((InVec0.getOpcode() == ISD::UNDEF || - InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) && - ((InVec1.getOpcode() == ISD::UNDEF || - InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3)) + ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && + ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1); if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) && isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) && - ((InVec0.getOpcode() == ISD::UNDEF || - InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) && - ((InVec1.getOpcode() == ISD::UNDEF || - InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3)) + ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && + ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1); } else if (VT == MVT::v8i32 || VT == MVT::v16i16) { // Try to match an AVX2 horizontal add/sub of signed integers. @@ -6550,17 +6546,13 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) && isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) && - ((InVec0.getOpcode() == ISD::UNDEF || - InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) && - ((InVec1.getOpcode() == ISD::UNDEF || - InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3)) + ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && + ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) X86Opcode = X86ISD::HADD; else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) && isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) && - ((InVec0.getOpcode() == ISD::UNDEF || - InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) && - ((InVec1.getOpcode() == ISD::UNDEF || - InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3)) + ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && + ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) X86Opcode = X86ISD::HSUB; else CanFold = false; @@ -6679,7 +6671,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { SmallSet<SDValue, 8> Values; for (unsigned i = 0; i < NumElems; ++i) { SDValue Elt = Op.getOperand(i); - if (Elt.getOpcode() == ISD::UNDEF) + if (Elt.isUndef()) continue; Values.insert(Elt); if (Elt.getOpcode() != ISD::Constant && @@ -6924,7 +6916,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { Result = DAG.getUNDEF(VT); for (unsigned i = 1; i < NumElems; ++i) { - if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; + if (Op.getOperand(i).isUndef()) continue; Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, Op.getOperand(i), DAG.getIntPtrConstant(i, dl)); } @@ -6954,7 +6946,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // right place, the one element (since it's the first round) being // inserted as undef can be dropped. This isn't safe for successive // rounds because they will permute elements within both vectors. - if (Ops[i+EltStride].getOpcode() == ISD::UNDEF && + if (Ops[i+EltStride].isUndef() && EltStride == NumElems/2) continue; @@ -7274,7 +7266,7 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask, SDValue Input = V.getOperand(M % Size); // The UNDEF opcode check really should be dead code here, but not quite // worth asserting on (it isn't invalid, just unexpected). - if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input)) + if (Input.isUndef() || X86::isZeroNode(Input)) Zeroable[i] = true; } @@ -11969,8 +11961,8 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget, assert((VT.getSizeInBits() != 64 || Is1BitVector) && "Can't lower MMX shuffles"); - bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; - bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; + bool V1IsUndef = V1.isUndef(); + bool V2IsUndef = V2.isUndef(); if (V1IsUndef && V2IsUndef) return DAG.getUNDEF(VT); @@ -12441,7 +12433,7 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const { if (IdxVal) EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec, DAG.getConstant(IdxVal, dl, MVT::i8)); - if (Vec.getOpcode() == ISD::UNDEF) + if (Vec.isUndef()) return EltInVec; return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec); } @@ -16751,7 +16743,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, case X86ISD::VSHLI: for (unsigned i=0; i!=NumElts; ++i) { SDValue CurrentOp = SrcOp->getOperand(i); - if (CurrentOp->getOpcode() == ISD::UNDEF) { + if (CurrentOp->isUndef()) { Elts.push_back(CurrentOp); continue; } @@ -16763,7 +16755,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, case X86ISD::VSRLI: for (unsigned i=0; i!=NumElts; ++i) { SDValue CurrentOp = SrcOp->getOperand(i); - if (CurrentOp->getOpcode() == ISD::UNDEF) { + if (CurrentOp->isUndef()) { Elts.push_back(CurrentOp); continue; } @@ -16775,7 +16767,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, case X86ISD::VSRAI: for (unsigned i=0; i!=NumElts; ++i) { SDValue CurrentOp = SrcOp->getOperand(i); - if (CurrentOp->getOpcode() == ISD::UNDEF) { + if (CurrentOp->isUndef()) { Elts.push_back(CurrentOp); continue; } @@ -16932,7 +16924,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, OpcodeSelect = X86ISD::SELECT; break; } - if (PreservedSrc.getOpcode() == ISD::UNDEF) + if (PreservedSrc.isUndef()) PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl); return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc); } @@ -16962,7 +16954,7 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask, Op.getOpcode() == X86ISD::VFPCLASSS) return DAG.getNode(ISD::OR, dl, VT, Op, IMask); - if (PreservedSrc.getOpcode() == ISD::UNDEF) + if (PreservedSrc.isUndef()) PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl); return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc); } @@ -17803,7 +17795,7 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other); SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); SDValue Segment = DAG.getRegister(0, MVT::i32); - if (Src.getOpcode() == ISD::UNDEF) + if (Src.isUndef()) Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl); SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain}; SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops); @@ -19362,7 +19354,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, // Check if this build_vector node is doing a splat. // If so, then set BaseShAmt equal to the splat value. BaseShAmt = BV->getSplatValue(); - if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF) + if (BaseShAmt && BaseShAmt.isUndef()) BaseShAmt = SDValue(); } else { if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) @@ -19497,7 +19489,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget, for (unsigned i=0; i !=NumElems; ++i) { SDValue Op = Amt->getOperand(i); - if (Op->getOpcode() == ISD::UNDEF) { + if (Op->isUndef()) { Elts.push_back(Op); continue; } @@ -24469,8 +24461,7 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG, auto Op0 = N.getOperand(0); auto Op1 = N.getOperand(1); - if (Op0.getOpcode() == ISD::UNDEF && - Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) { + if (Op0.isUndef() && Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) { ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask(); unsigned NumElts = VT.getVectorNumElements(); @@ -24813,8 +24804,7 @@ static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG, if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::VECTOR_SHUFFLE && N->getOperand(0).getOpcode() == ISD::BITCAST && - N->getOperand(1).getOpcode() == ISD::UNDEF && - N->getOperand(0).hasOneUse()) { + N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -25209,7 +25199,7 @@ transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG, SmallVector<int, 8> ShuffleMask(NumElems, -1); for (unsigned i = 0; i < NumElems; ++i) { // Be sure we emit undef where we can. - if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF) + if (Cond.getOperand(i)->isUndef()) ShuffleMask[i] = -1; else ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1); @@ -27315,7 +27305,7 @@ static int getOneTrueElt(SDValue V) { unsigned NumElts = BV->getValueType(0).getVectorNumElements(); for (unsigned i = 0; i < NumElts; ++i) { const SDValue &Op = BV->getOperand(i); - if (Op.getOpcode() == ISD::UNDEF) + if (Op.isUndef()) continue; auto *ConstNode = dyn_cast<ConstantSDNode>(Op); if (!ConstNode) |