diff options
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsFastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/R600/AMDGPUInstructions.td | 4 | ||||
-rw-r--r-- | llvm/lib/Target/R600/R600ISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrFragmentsSIMD.td | 28 |
13 files changed, 36 insertions, 39 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 0a47dcb725f..f75700d6d33 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -848,7 +848,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size, // MOV X0, WideImmediate // LDR X2, [BaseReg, X0] if (isa<ConstantSDNode>(RHS)) { - int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue(); + int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue(); unsigned Scale = Log2_32(Size); // Skip the immediate can be seleced by load/store addressing mode. // Also skip the immediate can be encoded by a single ADD (SUB is also diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp index b91b0e1390d..b2599fe9637 100644 --- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -132,7 +132,7 @@ SDNode *BPFDAGToDAGISel::Select(SDNode *Node) { } case ISD::FrameIndex: { - int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex(); + int FI = cast<FrameIndexSDNode>(Node)->getIndex(); EVT VT = Node->getValueType(0); SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT); unsigned Opc = BPF::MOV_rr; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index a2209ab187e..51b79cdef9b 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2106,7 +2106,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { // is Big Endian. unsigned OpIdx = NElts - i - 1; SDValue Operand = BVN->getOperand(OpIdx); - if (dyn_cast<ConstantSDNode>(Operand)) + if (isa<ConstantSDNode>(Operand)) // This operand is already in ConstVal. continue; diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index 7de00818337..e8e3d3d4b92 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -440,7 +440,7 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) { bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) { const GlobalValue *GV = dyn_cast<GlobalValue>(V); - if (GV && isa<Function>(GV) && dyn_cast<Function>(GV)->isIntrinsic()) + if (GV && isa<Function>(GV) && cast<Function>(GV)->isIntrinsic()) return false; if (!GV) return false; diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 477d3c7a546..22178f635b1 100644 --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -1765,12 +1765,11 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes, case Type::IntegerTyID: { const Type *ETy = CPV->getType(); if (ETy == Type::getInt8Ty(CPV->getContext())) { - unsigned char c = - (unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue(); + unsigned char c = (unsigned char)cast<ConstantInt>(CPV)->getZExtValue(); ptr = &c; aggBuffer->addBytes(ptr, 1, Bytes); } else if (ETy == Type::getInt16Ty(CPV->getContext())) { - short int16 = (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue(); + short int16 = (short)cast<ConstantInt>(CPV)->getZExtValue(); ptr = (unsigned char *)&int16; aggBuffer->addBytes(ptr, 2, Bytes); } else if (ETy == Type::getInt32Ty(CPV->getContext())) { diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index ff74e6e8ff3..8b0665708b9 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -3893,7 +3893,7 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, const SDNode *left = N0.getOperand(0).getNode(); const SDNode *right = N0.getOperand(1).getNode(); - if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right)) + if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right)) opIsLive = true; if (!opIsLive) diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp index 32646ee1133..6ab0fadf9a3 100644 --- a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -70,8 +70,8 @@ static void convertTransferToLoop( // srcAddr and dstAddr are expected to be pointer types, // so no check is made here. - unsigned srcAS = dyn_cast<PointerType>(srcAddr->getType())->getAddressSpace(); - unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace(); + unsigned srcAS = cast<PointerType>(srcAddr->getType())->getAddressSpace(); + unsigned dstAS = cast<PointerType>(dstAddr->getType())->getAddressSpace(); // Cast pointers to (char *) srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS)); @@ -108,7 +108,7 @@ static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr, origBB->getTerminator()->setSuccessor(0, loopBB); IRBuilder<> builder(origBB, origBB->getTerminator()); - unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace(); + unsigned dstAS = cast<PointerType>(dstAddr->getType())->getAddressSpace(); // Cast pointer to the type of value getting stored dstAddr = diff --git a/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index 7341cd97e61..def252a47b2 100644 --- a/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -345,7 +345,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { unsigned NOps = N->getNumOperands(); for (unsigned i = 0; i < NOps; i++) { // XXX: Why is this here? - if (dyn_cast<RegisterSDNode>(N->getOperand(i))) { + if (isa<RegisterSDNode>(N->getOperand(i))) { IsRegSeq = false; break; } diff --git a/llvm/lib/Target/R600/AMDGPUInstructions.td b/llvm/lib/Target/R600/AMDGPUInstructions.td index 4d08201d55e..eeb7f3fcde5 100644 --- a/llvm/lib/Target/R600/AMDGPUInstructions.td +++ b/llvm/lib/Target/R600/AMDGPUInstructions.td @@ -358,7 +358,7 @@ def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>; def mskor_global : PatFrag<(ops node:$val, node:$ptr), (AMDGPUstore_mskor node:$val, node:$ptr), [{ - return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; + return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; }]>; @@ -389,7 +389,7 @@ def flat_store : PatFrag<(ops node:$val, node:$ptr), def mskor_flat : PatFrag<(ops node:$val, node:$ptr), (AMDGPUstore_mskor node:$val, node:$ptr), [{ - return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; + return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; }]>; class global_binary_atomic_op<SDNode atomic_op> : PatFrag< diff --git a/llvm/lib/Target/R600/R600ISelLowering.cpp b/llvm/lib/Target/R600/R600ISelLowering.cpp index a34e2dc8f5c..b6b7067f7e1 100644 --- a/llvm/lib/Target/R600/R600ISelLowering.cpp +++ b/llvm/lib/Target/R600/R600ISelLowering.cpp @@ -1811,7 +1811,7 @@ SDValue Swz[4], SelectionDAG &DAG) const { BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap); for (unsigned i = 0; i < 4; i++) { - unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue(); + unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue(); if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); } @@ -1819,7 +1819,7 @@ SDValue Swz[4], SelectionDAG &DAG) const { SwizzleRemap.clear(); BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap); for (unsigned i = 0; i < 4; i++) { - unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue(); + unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue(); if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32); } diff --git a/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp index 419ec8b3d7e..2fc7b02f673 100644 --- a/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp +++ b/llvm/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp @@ -162,7 +162,7 @@ class R600TextureIntrinsicsReplacer : Value *SamplerId = I.getArgOperand(2); unsigned TextureType = - dyn_cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); + cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); unsigned SrcSelect[4] = { 0, 1, 2, 3 }; unsigned CT[4] = {1, 1, 1, 1}; @@ -186,7 +186,7 @@ class R600TextureIntrinsicsReplacer : Value *SamplerId = I.getArgOperand(5); unsigned TextureType = - dyn_cast<ConstantInt>(I.getArgOperand(6))->getZExtValue(); + cast<ConstantInt>(I.getArgOperand(6))->getZExtValue(); unsigned SrcSelect[4] = { 0, 1, 2, 3 }; unsigned CT[4] = {1, 1, 1, 1}; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2101724588b..4a031919395 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -11970,7 +11970,7 @@ static SDValue LowerZERO_EXTEND_AVX512(SDValue Op, // Now we have only mask extension assert(InVT.getVectorElementType() == MVT::i1); SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType()); - const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue(); + const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue(); SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy()); unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP, @@ -12046,7 +12046,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { } SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType()); - const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue(); + const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue(); SDValue CP = DAG.getConstantPool(C, getPointerTy()); unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP, @@ -15287,10 +15287,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, } case PREFETCH: { SDValue Hint = Op.getOperand(6); - unsigned HintVal; - if (dyn_cast<ConstantSDNode> (Hint) == nullptr || - (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1) - llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1"); + unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue(); + assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1"); unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0); SDValue Chain = Op.getOperand(0); SDValue Mask = Op.getOperand(2); @@ -24242,7 +24240,7 @@ TargetLowering::ConstraintWeight break; case 'G': case 'C': - if (dyn_cast<ConstantFP>(CallOperandVal)) { + if (isa<ConstantFP>(CallOperandVal)) { weight = CW_Constant; } break; diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 0bdabdf30d0..b75a9f4b2d0 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -631,53 +631,53 @@ def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec, def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast<MaskedLoadSDNode>(N)) - return cast<MaskedLoadSDNode>(N)->getAlignment() >= 16; + if (auto *Load = dyn_cast<MaskedLoadSDNode>(N)) + return Load->getAlignment() >= 16; return false; }]>; def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast<MaskedLoadSDNode>(N)) - return cast<MaskedLoadSDNode>(N)->getAlignment() >= 32; + if (auto *Load = dyn_cast<MaskedLoadSDNode>(N)) + return Load->getAlignment() >= 32; return false; }]>; def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast<MaskedLoadSDNode>(N)) - return cast<MaskedLoadSDNode>(N)->getAlignment() >= 64; + if (auto *Load = dyn_cast<MaskedLoadSDNode>(N)) + return Load->getAlignment() >= 64; return false; }]>; def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - return (dyn_cast<MaskedLoadSDNode>(N) != 0); + return isa<MaskedLoadSDNode>(N); }]>; def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast<MaskedStoreSDNode>(N)) - return cast<MaskedStoreSDNode>(N)->getAlignment() >= 16; + if (auto *Store = dyn_cast<MaskedStoreSDNode>(N)) + return Store->getAlignment() >= 16; return false; }]>; def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast<MaskedStoreSDNode>(N)) - return cast<MaskedStoreSDNode>(N)->getAlignment() >= 32; + if (auto *Store = dyn_cast<MaskedStoreSDNode>(N)) + return Store->getAlignment() >= 32; return false; }]>; def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - if (dyn_cast<MaskedStoreSDNode>(N)) - return cast<MaskedStoreSDNode>(N)->getAlignment() >= 64; + if (auto *Store = dyn_cast<MaskedStoreSDNode>(N)) + return Store->getAlignment() >= 64; return false; }]>; def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - return (dyn_cast<MaskedStoreSDNode>(N) != 0); + return isa<MaskedStoreSDNode>(N); }]>; |