diff options
Diffstat (limited to 'mlir/lib')
24 files changed, 61 insertions, 61 deletions
diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp index 2dfed934ee0..51209da7385 100644 --- a/mlir/lib/AffineOps/AffineOps.cpp +++ b/mlir/lib/AffineOps/AffineOps.cpp @@ -61,11 +61,11 @@ bool mlir::isValidDim(Value *value) { if (op->getParentOp() == nullptr || op->isa<ConstantOp>()) return true; // Affine apply operation is ok if all of its operands are ok. - if (auto applyOp = dyn_cast<AffineApplyOp>(op)) + if (auto applyOp = op->dyn_cast<AffineApplyOp>()) return applyOp.isValidDim(); // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (auto dimOp = dyn_cast<DimOp>(op)) + if (auto dimOp = op->dyn_cast<DimOp>()) return isTopLevelSymbol(dimOp.getOperand()); return false; } @@ -86,11 +86,11 @@ bool mlir::isValidSymbol(Value *value) { if (op->getParentOp() == nullptr || op->isa<ConstantOp>()) return true; // Affine apply operation is ok if all of its operands are ok. - if (auto applyOp = dyn_cast<AffineApplyOp>(op)) + if (auto applyOp = op->dyn_cast<AffineApplyOp>()) return applyOp.isValidSymbol(); // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (auto dimOp = dyn_cast<DimOp>(op)) + if (auto dimOp = op->dyn_cast<DimOp>()) return isTopLevelSymbol(dimOp.getOperand()); return false; } diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp index 60f2b142986..78caa4c2625 100644 --- a/mlir/lib/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Analysis/LoopAnalysis.cpp @@ -320,8 +320,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop, loadAndStores.match(forOp, &loadAndStoresMatched); for (auto ls : loadAndStoresMatched) { auto *op = ls.getMatchedOperation(); - auto load = dyn_cast<LoadOp>(op); - auto store = dyn_cast<StoreOp>(op); + auto load = op->dyn_cast<LoadOp>(); + auto store = op->dyn_cast<StoreOp>(); // Only scalar types are considered vectorizable, all load/store must be // vectorizable for a loop to qualify as vectorizable. // TODO(ntv): ponder whether we want to be more general here. @@ -338,8 +338,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop, bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim) { VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) { - auto load = dyn_cast<LoadOp>(op); - auto store = dyn_cast<StoreOp>(op); + auto load = op.dyn_cast<LoadOp>(); + auto store = op.dyn_cast<StoreOp>(); return load ? isContiguousAccess(loop.getInductionVar(), load, memRefDim) : isContiguousAccess(loop.getInductionVar(), store, memRefDim); }); diff --git a/mlir/lib/Analysis/MemRefBoundCheck.cpp b/mlir/lib/Analysis/MemRefBoundCheck.cpp index 4e23441d5a5..0fb88620fa1 100644 --- a/mlir/lib/Analysis/MemRefBoundCheck.cpp +++ b/mlir/lib/Analysis/MemRefBoundCheck.cpp @@ -48,9 +48,9 @@ FunctionPassBase *mlir::createMemRefBoundCheckPass() { void MemRefBoundCheck::runOnFunction() { getFunction().walk([](Operation *opInst) { - if (auto loadOp = dyn_cast<LoadOp>(opInst)) { + if (auto loadOp = opInst->dyn_cast<LoadOp>()) { boundCheckLoadOrStoreOp(loadOp); - } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) { + } else if (auto storeOp = opInst->dyn_cast<StoreOp>()) { boundCheckLoadOrStoreOp(storeOp); } // TODO(bondhugula): do this for DMA ops as well. diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp index 155a2bbbd1b..bce000a4c1f 100644 --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -50,7 +50,7 @@ static void getForwardSliceImpl(Operation *op, return; } - if (auto forOp = dyn_cast<AffineForOp>(op)) { + if (auto forOp = op->dyn_cast<AffineForOp>()) { for (auto &u : forOp.getInductionVar()->getUses()) { auto *ownerInst = u.getOwner(); if (forwardSlice->count(ownerInst) == 0) { diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp index 8d963e4739c..1eaab676567 100644 --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -44,7 +44,7 @@ void mlir::getLoopIVs(Operation &op, SmallVectorImpl<AffineForOp> *loops) { AffineForOp currAffineForOp; // Traverse up the hierarchy collecing all 'affine.for' operation while // skipping over 'affine.if' operations. - while (currOp && ((currAffineForOp = dyn_cast<AffineForOp>(currOp)) || + while (currOp && ((currAffineForOp = currOp->dyn_cast<AffineForOp>()) || currOp->isa<AffineIfOp>())) { if (currAffineForOp) loops->push_back(currAffineForOp); @@ -239,7 +239,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, assert(isValidSymbol(symbol)); // Check if the symbol is a constant. if (auto *op = symbol->getDefiningOp()) { - if (auto constOp = dyn_cast<ConstantIndexOp>(op)) { + if (auto constOp = op->dyn_cast<ConstantIndexOp>()) { cst.setIdToConstant(*symbol, constOp.getValue()); } } @@ -467,7 +467,7 @@ static Operation *getInstAtPosition(ArrayRef<unsigned> positions, } if (level == positions.size() - 1) return &op; - if (auto childAffineForOp = dyn_cast<AffineForOp>(op)) + if (auto childAffineForOp = op.dyn_cast<AffineForOp>()) return getInstAtPosition(positions, level + 1, childAffineForOp.getBody()); @@ -633,7 +633,7 @@ mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst, // Constructs MemRefAccess populating it with the memref, its indices and // opinst from 'loadOrStoreOpInst'. MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { - if (auto loadOp = dyn_cast<LoadOp>(loadOrStoreOpInst)) { + if (auto loadOp = loadOrStoreOpInst->dyn_cast<LoadOp>()) { memref = loadOp.getMemRef(); opInst = loadOrStoreOpInst; auto loadMemrefType = loadOp.getMemRefType(); @@ -643,7 +643,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { } } else { assert(loadOrStoreOpInst->isa<StoreOp>() && "load/store op expected"); - auto storeOp = dyn_cast<StoreOp>(loadOrStoreOpInst); + auto storeOp = loadOrStoreOpInst->dyn_cast<StoreOp>(); opInst = loadOrStoreOpInst; memref = storeOp.getMemRef(); auto storeMemrefType = storeOp.getMemRefType(); @@ -750,7 +750,7 @@ Optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp, void mlir::getSequentialLoops( AffineForOp forOp, llvm::SmallDenseSet<Value *, 8> *sequentialLoops) { forOp.getOperation()->walk([&](Operation *op) { - if (auto innerFor = dyn_cast<AffineForOp>(op)) + if (auto innerFor = op->dyn_cast<AffineForOp>()) if (!isLoopParallel(innerFor)) sequentialLoops->insert(innerFor.getInductionVar()); }); diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp index 8fecf058bfc..b45ac001be4 100644 --- a/mlir/lib/Analysis/VectorAnalysis.cpp +++ b/mlir/lib/Analysis/VectorAnalysis.cpp @@ -152,7 +152,7 @@ static SetVector<Operation *> getParentsOfType(Operation *op) { SetVector<Operation *> res; auto *current = op; while (auto *parent = current->getParentOp()) { - if (auto typedParent = dyn_cast<T>(parent)) { + if (auto typedParent = parent->template dyn_cast<T>()) { assert(res.count(parent) == 0 && "Already inserted"); res.insert(parent); } @@ -177,7 +177,7 @@ AffineMap mlir::makePermutationMap( } } - if (auto load = dyn_cast<LoadOp>(op)) { + if (auto load = op->dyn_cast<LoadOp>()) { return ::makePermutationMap(load.getIndices(), enclosingLoopToVectorDim); } @@ -198,10 +198,10 @@ bool mlir::matcher::operatesOnSuperVectorsOf(Operation &op, /// do not have to special case. Maybe a trait, or just a method, unclear atm. bool mustDivide = false; VectorType superVectorType; - if (auto read = dyn_cast<VectorTransferReadOp>(op)) { + if (auto read = op.dyn_cast<VectorTransferReadOp>()) { superVectorType = read.getResultType(); mustDivide = true; - } else if (auto write = dyn_cast<VectorTransferWriteOp>(op)) { + } else if (auto write = op.dyn_cast<VectorTransferWriteOp>()) { superVectorType = write.getVectorType(); mustDivide = true; } else if (op.getNumResults() == 0) { diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp index 2c9117736ae..610c8b66320 100644 --- a/mlir/lib/EDSC/Builders.cpp +++ b/mlir/lib/EDSC/Builders.cpp @@ -100,7 +100,7 @@ ValueHandle ValueHandle::create(StringRef name, ArrayRef<ValueHandle> operands, if (op->getNumResults() == 1) { return ValueHandle(op->getResult(0)); } - if (auto f = dyn_cast<AffineForOp>(op)) { + if (auto f = op->dyn_cast<AffineForOp>()) { return ValueHandle(f.getInductionVar()); } llvm_unreachable("unsupported operation, use an OperationHandle instead"); @@ -147,8 +147,8 @@ static llvm::Optional<ValueHandle> emitStaticFor(ArrayRef<ValueHandle> lbs, if (!lbDef || !ubDef) return llvm::Optional<ValueHandle>(); - auto lbConst = dyn_cast<ConstantIndexOp>(lbDef); - auto ubConst = dyn_cast<ConstantIndexOp>(ubDef); + auto lbConst = lbDef->dyn_cast<ConstantIndexOp>(); + auto ubConst = ubDef->dyn_cast<ConstantIndexOp>(); if (!lbConst || !ubConst) return llvm::Optional<ValueHandle>(); diff --git a/mlir/lib/Linalg/Transforms/Tiling.cpp b/mlir/lib/Linalg/Transforms/Tiling.cpp index 6e20542a818..434f7206e04 100644 --- a/mlir/lib/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Linalg/Transforms/Tiling.cpp @@ -319,11 +319,11 @@ static LogicalResult tileLinalgOp(LinalgOp &op, ArrayRef<int64_t> tileSizes, // TODO(ntv) expose as a primitive for other passes. static LogicalResult tileLinalgOp(Operation *op, ArrayRef<int64_t> tileSizes, PerFunctionState &state) { - if (auto matmulOp = dyn_cast<MatmulOp>(op)) { + if (auto matmulOp = op->dyn_cast<MatmulOp>()) { return tileLinalgOp(matmulOp, tileSizes, state); - } else if (auto matvecOp = dyn_cast<MatvecOp>(op)) { + } else if (auto matvecOp = op->dyn_cast<MatvecOp>()) { return tileLinalgOp(matvecOp, tileSizes, state); - } else if (auto dotOp = dyn_cast<DotOp>(op)) { + } else if (auto dotOp = op->dyn_cast<DotOp>()) { return tileLinalgOp(dotOp, tileSizes, state); } return failure(); diff --git a/mlir/lib/Linalg/Utils/Utils.cpp b/mlir/lib/Linalg/Utils/Utils.cpp index 98cf4b75b6a..4b77ece21dd 100644 --- a/mlir/lib/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Linalg/Utils/Utils.cpp @@ -68,9 +68,9 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()( SmallVector<Value *, 8> mlir::getRanges(Operation *op) { SmallVector<Value *, 8> res; - if (auto view = dyn_cast<ViewOp>(op)) { + if (auto view = op->dyn_cast<ViewOp>()) { res.append(view.getIndexings().begin(), view.getIndexings().end()); - } else if (auto slice = dyn_cast<SliceOp>(op)) { + } else if (auto slice = op->dyn_cast<SliceOp>()) { for (auto *i : slice.getIndexings()) if (i->getType().isa<RangeType>()) res.push_back(i); @@ -100,7 +100,7 @@ SmallVector<Value *, 8> mlir::getRanges(Operation *op) { Value *mlir::createOrReturnView(FuncBuilder *b, Location loc, Operation *viewDefiningOp, ArrayRef<Value *> ranges) { - if (auto view = dyn_cast<ViewOp>(viewDefiningOp)) { + if (auto view = viewDefiningOp->dyn_cast<ViewOp>()) { auto indexings = view.getIndexings(); if (std::equal(indexings.begin(), indexings.end(), ranges.begin())) return view.getResult(); diff --git a/mlir/lib/StandardOps/Ops.cpp b/mlir/lib/StandardOps/Ops.cpp index bc68a78bd0a..05e3b13eb4c 100644 --- a/mlir/lib/StandardOps/Ops.cpp +++ b/mlir/lib/StandardOps/Ops.cpp @@ -134,7 +134,7 @@ struct MemRefCastFolder : public RewritePattern { void rewrite(Operation *op, PatternRewriter &rewriter) const override { for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i) if (auto *memref = op->getOperand(i)->getDefiningOp()) - if (auto cast = dyn_cast<MemRefCastOp>(memref)) + if (auto cast = memref->dyn_cast<MemRefCastOp>()) op->setOperand(i, cast.getOperand()); rewriter.updatedRootInPlace(op); } diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index 597efc3ba37..8a9c649feb3 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -199,11 +199,11 @@ bool ModuleTranslation::convertOperation(Operation &opInst, // Emit branches. We need to look up the remapped blocks and ignore the block // arguments that were transformed into PHI nodes. - if (auto brOp = dyn_cast<LLVM::BrOp>(opInst)) { + if (auto brOp = opInst.dyn_cast<LLVM::BrOp>()) { builder.CreateBr(blockMapping[brOp.getSuccessor(0)]); return false; } - if (auto condbrOp = dyn_cast<LLVM::CondBrOp>(opInst)) { + if (auto condbrOp = opInst.dyn_cast<LLVM::CondBrOp>()) { builder.CreateCondBr(valueMapping.lookup(condbrOp.getOperand(0)), blockMapping[condbrOp.getSuccessor(0)], blockMapping[condbrOp.getSuccessor(1)]); @@ -264,7 +264,7 @@ static Value *getPHISourceValue(Block *current, Block *pred, // For conditional branches, we need to check if the current block is reached // through the "true" or the "false" branch and take the relevant operands. - auto condBranchOp = dyn_cast<LLVM::CondBrOp>(terminator); + auto condBranchOp = terminator.dyn_cast<LLVM::CondBrOp>(); assert(condBranchOp && "only branch operations can be terminators of a block that " "has successors"); diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp index 937399cc703..10f47fe9be1 100644 --- a/mlir/lib/Transforms/DmaGeneration.cpp +++ b/mlir/lib/Transforms/DmaGeneration.cpp @@ -173,11 +173,11 @@ static void getMultiLevelStrides(const MemRefRegion ®ion, static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs, MemRefRegion *region) { unsigned rank; - if (auto loadOp = dyn_cast<LoadOp>(opInst)) { + if (auto loadOp = opInst->dyn_cast<LoadOp>()) { rank = loadOp.getMemRefType().getRank(); region->memref = loadOp.getMemRef(); region->setWrite(false); - } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) { + } else if (auto storeOp = opInst->dyn_cast<StoreOp>()) { rank = storeOp.getMemRefType().getRank(); region->memref = storeOp.getMemRef(); region->setWrite(true); @@ -483,7 +483,7 @@ bool DmaGeneration::runOnBlock(Block *block) { }); for (auto it = curBegin; it != block->end(); ++it) { - if (auto forOp = dyn_cast<AffineForOp>(&*it)) { + if (auto forOp = it->dyn_cast<AffineForOp>()) { // Returns true if the footprint is known to exceed capacity. auto exceedsCapacity = [&](AffineForOp forOp) { Optional<int64_t> footprint = @@ -607,10 +607,10 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { // Walk this range of operations to gather all memory regions. block->walk(begin, end, [&](Operation *opInst) { // Gather regions to allocate to buffers in faster memory space. - if (auto loadOp = dyn_cast<LoadOp>(opInst)) { + if (auto loadOp = opInst->dyn_cast<LoadOp>()) { if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace) return; - } else if (auto storeOp = dyn_cast<StoreOp>(opInst)) { + } else if (auto storeOp = opInst->dyn_cast<StoreOp>()) { if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace) return; } else { @@ -739,7 +739,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { // For a range of operations, a note will be emitted at the caller. AffineForOp forOp; uint64_t sizeInKib = llvm::divideCeil(totalDmaBuffersSizeInBytes, 1024); - if (llvm::DebugFlag && (forOp = dyn_cast<AffineForOp>(&*begin))) { + if (llvm::DebugFlag && (forOp = begin->dyn_cast<AffineForOp>())) { forOp.emitRemark() << sizeInKib << " KiB of DMA buffers in fast memory space for this block\n"; diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index 1c4a4d1f755..796d2164ad9 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -644,7 +644,7 @@ bool MemRefDependenceGraph::init(Function &f) { DenseMap<Operation *, unsigned> forToNodeMap; for (auto &op : f.front()) { - if (auto forOp = dyn_cast<AffineForOp>(op)) { + if (auto forOp = op.dyn_cast<AffineForOp>()) { // Create graph node 'id' to represent top-level 'forOp' and record // all loads and store accesses it contains. LoopNestStateCollector collector; @@ -666,14 +666,14 @@ bool MemRefDependenceGraph::init(Function &f) { } forToNodeMap[&op] = node.id; nodes.insert({node.id, node}); - } else if (auto loadOp = dyn_cast<LoadOp>(op)) { + } else if (auto loadOp = op.dyn_cast<LoadOp>()) { // Create graph node for top-level load op. Node node(nextNodeId++, &op); node.loads.push_back(&op); auto *memref = op.cast<LoadOp>().getMemRef(); memrefAccesses[memref].insert(node.id); nodes.insert({node.id, node}); - } else if (auto storeOp = dyn_cast<StoreOp>(op)) { + } else if (auto storeOp = op.dyn_cast<StoreOp>()) { // Create graph node for top-level store op. Node node(nextNodeId++, &op); node.stores.push_back(&op); @@ -2125,7 +2125,7 @@ public: auto *fn = dstNode->op->getFunction(); for (unsigned i = 0, e = fn->getNumArguments(); i != e; ++i) { for (auto &use : fn->getArgument(i)->getUses()) { - if (auto loadOp = dyn_cast<LoadOp>(use.getOwner())) { + if (auto loadOp = use.getOwner()->dyn_cast<LoadOp>()) { // Gather loops surrounding 'use'. SmallVector<AffineForOp, 4> loops; getLoopIVs(*use.getOwner(), &loops); diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp index 28e13d89ada..ce42a5eba85 100644 --- a/mlir/lib/Transforms/LoopTiling.cpp +++ b/mlir/lib/Transforms/LoopTiling.cpp @@ -273,7 +273,7 @@ static void getTileableBands(Function &f, for (auto &block : f) for (auto &op : block) - if (auto forOp = dyn_cast<AffineForOp>(op)) + if (auto forOp = op.dyn_cast<AffineForOp>()) getMaximalPerfectLoopNest(forOp); } diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp index 0a23295c8d9..366a7ede5eb 100644 --- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp +++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp @@ -92,7 +92,7 @@ void LoopUnrollAndJam::runOnFunction() { // unroll-and-jammed by this pass. However, runOnAffineForOp can be called on // any for operation. auto &entryBlock = getFunction().front(); - if (auto forOp = dyn_cast<AffineForOp>(entryBlock.front())) + if (auto forOp = entryBlock.front().dyn_cast<AffineForOp>()) runOnAffineForOp(forOp); } diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp index 1ffe5e3ddd7..dc389c8e37a 100644 --- a/mlir/lib/Transforms/LowerAffine.cpp +++ b/mlir/lib/Transforms/LowerAffine.cpp @@ -620,10 +620,10 @@ void LowerAffinePass::runOnFunction() { // Rewrite all of the ifs and fors. We walked the operations in postorders, // so we know that we will rewrite them in the reverse order. for (auto *op : llvm::reverse(instsToRewrite)) { - if (auto ifOp = dyn_cast<AffineIfOp>(op)) { + if (auto ifOp = op->dyn_cast<AffineIfOp>()) { if (lowerAffineIf(ifOp)) return signalPassFailure(); - } else if (auto forOp = dyn_cast<AffineForOp>(op)) { + } else if (auto forOp = op->dyn_cast<AffineForOp>()) { if (lowerAffineFor(forOp)) return signalPassFailure(); } else if (lowerAffineApply(op->cast<AffineApplyOp>())) { diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp index 28dfb2278e0..2f06a9aa3bf 100644 --- a/mlir/lib/Transforms/MaterializeVectors.cpp +++ b/mlir/lib/Transforms/MaterializeVectors.cpp @@ -556,12 +556,12 @@ static bool instantiateMaterialization(Operation *op, if (op->getNumRegions() != 0) return op->emitError("NYI path Op with region"), true; - if (auto write = dyn_cast<VectorTransferWriteOp>(op)) { + if (auto write = op->dyn_cast<VectorTransferWriteOp>()) { auto *clone = instantiate(&b, write, state->hwVectorType, state->hwVectorInstance, state->substitutionsMap); return clone == nullptr; } - if (auto read = dyn_cast<VectorTransferReadOp>(op)) { + if (auto read = op->dyn_cast<VectorTransferReadOp>()) { auto *clone = instantiate(&b, read, state->hwVectorType, state->hwVectorInstance, state->substitutionsMap); if (!clone) { diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index 94df936c93f..a63d462c4a9 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -103,7 +103,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) { SmallVector<Operation *, 8> storeOps; unsigned minSurroundingLoops = getNestingDepth(*loadOpInst); for (auto &use : loadOp.getMemRef()->getUses()) { - auto storeOp = dyn_cast<StoreOp>(use.getOwner()); + auto storeOp = use.getOwner()->dyn_cast<StoreOp>(); if (!storeOp) continue; auto *storeOpInst = storeOp.getOperation(); diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index 0da97f7d169..66fbf4a1306 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -181,7 +181,7 @@ static void findMatchingStartFinishInsts( // Collect outgoing DMA operations - needed to check for dependences below. SmallVector<DmaStartOp, 4> outgoingDmaOps; for (auto &op : *forOp.getBody()) { - auto dmaStartOp = dyn_cast<DmaStartOp>(op); + auto dmaStartOp = op.dyn_cast<DmaStartOp>(); if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster()) outgoingDmaOps.push_back(dmaStartOp); } @@ -193,7 +193,7 @@ static void findMatchingStartFinishInsts( dmaFinishInsts.push_back(&op); continue; } - auto dmaStartOp = dyn_cast<DmaStartOp>(op); + auto dmaStartOp = op.dyn_cast<DmaStartOp>(); if (!dmaStartOp) continue; diff --git a/mlir/lib/Transforms/TestConstantFold.cpp b/mlir/lib/Transforms/TestConstantFold.cpp index ec1e971973e..0990d7a73f6 100644 --- a/mlir/lib/Transforms/TestConstantFold.cpp +++ b/mlir/lib/Transforms/TestConstantFold.cpp @@ -48,7 +48,7 @@ void TestConstantFold::foldOperation(Operation *op, } // If this op is a constant that are used and cannot be de-duplicated, // remember it for cleanup later. - else if (auto constant = dyn_cast<ConstantOp>(op)) { + else if (auto constant = op->dyn_cast<ConstantOp>()) { existingConstants.push_back(op); } } diff --git a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp index b907840b27d..fc8209be872 100644 --- a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp @@ -40,7 +40,7 @@ bool ConstantFoldHelper::tryToConstantFold( // into the value it contains. We need to consider constants before the // constant folding logic to avoid re-creating the same constant later. // TODO: Extend to support dialect-specific constant ops. - if (auto constant = dyn_cast<ConstantOp>(op)) { + if (auto constant = op->dyn_cast<ConstantOp>()) { // If this constant is dead, update bookkeeping and signal the caller. if (constant.use_empty()) { notifyRemoval(op); diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 7fbb48ecf99..a10e4a1ae49 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -363,7 +363,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops, nestedLoops.push_back(curr); auto *currBody = curr.getBody(); while (currBody->begin() == std::prev(currBody->end(), 2) && - (curr = dyn_cast<AffineForOp>(curr.getBody()->front()))) { + (curr = curr.getBody()->front().dyn_cast<AffineForOp>())) { nestedLoops.push_back(curr); currBody = curr.getBody(); } diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp index b64dc53e037..753f7cf750f 100644 --- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp +++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp @@ -234,7 +234,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) { static bool affineApplyOp(Operation &op) { return op.isa<AffineApplyOp>(); } static bool singleResultAffineApplyOpWithoutUses(Operation &op) { - auto app = dyn_cast<AffineApplyOp>(op); + auto app = op.dyn_cast<AffineApplyOp>(); return app && app.use_empty(); } diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index 9b8768a6445..025a6535a78 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -839,8 +839,8 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step, loadAndStores.match(loop.getOperation(), &loadAndStoresMatches); for (auto ls : loadAndStoresMatches) { auto *opInst = ls.getMatchedOperation(); - auto load = dyn_cast<LoadOp>(opInst); - auto store = dyn_cast<StoreOp>(opInst); + auto load = opInst->dyn_cast<LoadOp>(); + auto store = opInst->dyn_cast<StoreOp>(); LLVM_DEBUG(opInst->print(dbgs())); LogicalResult result = load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state) @@ -982,7 +982,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op, return nullptr; } // 3. vectorize constant. - if (auto constant = dyn_cast<ConstantOp>(operand->getDefiningOp())) { + if (auto constant = operand->getDefiningOp()->dyn_cast<ConstantOp>()) { return vectorizeConstant( op, constant, VectorType::get(state->strategy->vectorSizes, operand->getType())); @@ -1012,7 +1012,7 @@ static Operation *vectorizeOneOperation(Operation *opInst, assert(!opInst->isa<VectorTransferWriteOp>() && "vector.transfer_write cannot be further vectorized"); - if (auto store = dyn_cast<StoreOp>(opInst)) { + if (auto store = opInst->dyn_cast<StoreOp>()) { auto *memRef = store.getMemRef(); auto *value = store.getValueToStore(); auto *vectorValue = vectorizeOperand(value, opInst, state); |

