diff options
Diffstat (limited to 'mlir/lib/Transforms')
-rw-r--r-- | mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp | 6 | ||||
-rw-r--r-- | mlir/lib/Transforms/DialectConversion.cpp | 32 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopFusion.cpp | 22 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopInvariantCodeMotion.cpp | 2 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopTiling.cpp | 2 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopUnrollAndJam.cpp | 2 | ||||
-rw-r--r-- | mlir/lib/Transforms/MemRefDataFlowOpt.cpp | 8 | ||||
-rw-r--r-- | mlir/lib/Transforms/PipelineDataTransfer.cpp | 20 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/FoldUtils.cpp | 6 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp | 8 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/InliningUtils.cpp | 20 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/LoopFusionUtils.cpp | 4 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/LoopUtils.cpp | 37 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/RegionUtils.cpp | 6 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/Utils.cpp | 36 | ||||
-rw-r--r-- | mlir/lib/Transforms/Vectorize.cpp | 23 |
16 files changed, 115 insertions, 119 deletions
diff --git a/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp index 24ec2d7c70b..f2c23013a0d 100644 --- a/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp +++ b/mlir/lib/Transforms/AffineLoopInvariantCodeMotion.cpp @@ -91,7 +91,7 @@ bool isOpLoopInvariant(Operation &op, Value indVar, Value memref = isa<AffineLoadOp>(op) ? cast<AffineLoadOp>(op).getMemRef() : cast<AffineStoreOp>(op).getMemRef(); - for (auto *user : memref->getUsers()) { + for (auto *user : memref.getUsers()) { // If this memref has a user that is a DMA, give up because these // operations write to this memref. if (isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op)) { @@ -122,10 +122,10 @@ bool isOpLoopInvariant(Operation &op, Value indVar, return false; } for (unsigned int i = 0; i < op.getNumOperands(); ++i) { - auto *operandSrc = op.getOperand(i)->getDefiningOp(); + auto *operandSrc = op.getOperand(i).getDefiningOp(); LLVM_DEBUG( - op.getOperand(i)->print(llvm::dbgs() << "\nIterating on operand\n")); + op.getOperand(i).print(llvm::dbgs() << "\nIterating on operand\n")); // If the loop IV is the operand, this op isn't loop invariant. if (indVar == op.getOperand(i)) { diff --git a/mlir/lib/Transforms/DialectConversion.cpp b/mlir/lib/Transforms/DialectConversion.cpp index 5f7fb7a68c9..3f202c2a95b 100644 --- a/mlir/lib/Transforms/DialectConversion.cpp +++ b/mlir/lib/Transforms/DialectConversion.cpp @@ -227,7 +227,7 @@ void ArgConverter::notifyOpRemoved(Operation *op) { // Drop all uses of the original arguments and delete the original block. Block *origBlock = it->second.origBlock; for (BlockArgument arg : origBlock->getArguments()) - arg->dropAllUses(); + arg.dropAllUses(); conversionInfo.erase(it); } } @@ -241,7 +241,7 @@ void ArgConverter::discardRewrites(Block *block) { // Drop all uses of the new block arguments and replace uses of the new block. for (int i = block->getNumArguments() - 1; i >= 0; --i) - block->getArgument(i)->dropAllUses(); + block->getArgument(i).dropAllUses(); block->replaceAllUsesWith(origBlock); // Move the operations back the original block and the delete the new block. @@ -269,17 +269,17 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) { // replace all uses. auto argReplacementValue = mapping.lookupOrDefault(origArg); if (argReplacementValue != origArg) { - origArg->replaceAllUsesWith(argReplacementValue); + origArg.replaceAllUsesWith(argReplacementValue); continue; } // If there are any dangling uses then replace the argument with one // generated by the type converter. This is necessary as the cast must // persist in the IR after conversion. - if (!origArg->use_empty()) { + if (!origArg.use_empty()) { rewriter.setInsertionPointToStart(newBlock); auto *newOp = typeConverter->materializeConversion( - rewriter, origArg->getType(), llvm::None, loc); - origArg->replaceAllUsesWith(newOp->getResult(0)); + rewriter, origArg.getType(), llvm::None, loc); + origArg.replaceAllUsesWith(newOp->getResult(0)); } continue; } @@ -290,7 +290,7 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) { // type are the same, otherwise it should force a conversion to be // materialized. if (argInfo->newArgSize == 1) { - origArg->replaceAllUsesWith( + origArg.replaceAllUsesWith( mapping.lookupOrDefault(newBlock->getArgument(argInfo->newArgIdx))); continue; } @@ -300,13 +300,13 @@ void ArgConverter::applyRewrites(ConversionValueMapping &mapping) { assert(argInfo->newArgSize > 1 && castValue && "expected 1->N mapping"); // If the argument is still used, replace it with the generated cast. - if (!origArg->use_empty()) - origArg->replaceAllUsesWith(mapping.lookupOrDefault(castValue)); + if (!origArg.use_empty()) + origArg.replaceAllUsesWith(mapping.lookupOrDefault(castValue)); // If all users of the cast were removed, we can drop it. Otherwise, keep // the operation alive and let the user handle any remaining usages. - if (castValue->use_empty()) - castValue->getDefiningOp()->erase(); + if (castValue.use_empty()) + castValue.getDefiningOp()->erase(); } } } @@ -372,7 +372,7 @@ Block *ArgConverter::applySignatureConversion( // to pack the new values. auto replArgs = newArgs.slice(inputMap->inputNo, inputMap->size); Operation *cast = typeConverter->materializeConversion( - rewriter, origArg->getType(), replArgs, loc); + rewriter, origArg.getType(), replArgs, loc); assert(cast->getNumResults() == 1 && cast->getNumOperands() == replArgs.size()); mapping.map(origArg, cast->getResult(0)); @@ -709,7 +709,7 @@ void ConversionPatternRewriterImpl::applyRewrites() { for (auto &repl : replacements) { for (unsigned i = 0, e = repl.newValues.size(); i != e; ++i) { if (auto newValue = repl.newValues[i]) - repl.op->getResult(i)->replaceAllUsesWith( + repl.op->getResult(i).replaceAllUsesWith( mapping.lookupOrDefault(newValue)); } @@ -863,8 +863,8 @@ Block *ConversionPatternRewriter::applySignatureConversion( void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument from, Value to) { - for (auto &u : from->getUses()) { - if (u.getOwner() == to->getDefiningOp()) + for (auto &u : from.getUses()) { + if (u.getOwner() == to.getDefiningOp()) continue; u.getOwner()->replaceUsesOfWith(from, to); } @@ -1665,7 +1665,7 @@ auto TypeConverter::convertBlockSignature(Block *block) -> Optional<SignatureConversion> { SignatureConversion conversion(block->getNumArguments()); for (unsigned i = 0, e = block->getNumArguments(); i != e; ++i) - if (failed(convertSignatureArg(i, block->getArgument(i)->getType(), + if (failed(convertSignatureArg(i, block->getArgument(i).getType(), conversion))) return llvm::None; return conversion; diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index fcfc1d7ae52..970907c81fe 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -301,12 +301,12 @@ public: Node *node = getNode(id); for (auto *storeOpInst : node->stores) { auto memref = cast<AffineStoreOp>(storeOpInst).getMemRef(); - auto *op = memref->getDefiningOp(); + auto *op = memref.getDefiningOp(); // Return true if 'memref' is a block argument. if (!op) return true; // Return true if any use of 'memref' escapes the function. - for (auto *user : memref->getUsers()) + for (auto *user : memref.getUsers()) if (!isMemRefDereferencingOp(*user)) return true; } @@ -390,7 +390,7 @@ public: if (!hasEdge(srcId, dstId, value)) { outEdges[srcId].push_back({dstId, value}); inEdges[dstId].push_back({srcId, value}); - if (value->getType().isa<MemRefType>()) + if (value.getType().isa<MemRefType>()) memrefEdgeCount[value]++; } } @@ -399,7 +399,7 @@ public: void removeEdge(unsigned srcId, unsigned dstId, Value value) { assert(inEdges.count(dstId) > 0); assert(outEdges.count(srcId) > 0); - if (value->getType().isa<MemRefType>()) { + if (value.getType().isa<MemRefType>()) { assert(memrefEdgeCount.count(value) > 0); memrefEdgeCount[value]--; } @@ -634,7 +634,7 @@ public: const std::function<void(Edge)> &callback) { for (auto &edge : edges) { // Skip if 'edge' is not a memref dependence edge. - if (!edge.value->getType().isa<MemRefType>()) + if (!edge.value.getType().isa<MemRefType>()) continue; assert(nodes.count(edge.id) > 0); // Skip if 'edge.id' is not a loop nest. @@ -735,7 +735,7 @@ bool MemRefDependenceGraph::init(FuncOp f) { continue; auto *opInst = node.op; for (auto value : opInst->getResults()) { - for (auto *user : value->getUsers()) { + for (auto *user : value.getUsers()) { SmallVector<AffineForOp, 4> loops; getLoopIVs(*user, &loops); if (loops.empty()) @@ -896,7 +896,7 @@ static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst, OpBuilder top(forInst->getParentOfType<FuncOp>().getBody()); // Create new memref type based on slice bounds. auto oldMemRef = cast<AffineStoreOp>(srcStoreOpInst).getMemRef(); - auto oldMemRefType = oldMemRef->getType().cast<MemRefType>(); + auto oldMemRefType = oldMemRef.getType().cast<MemRefType>(); unsigned rank = oldMemRefType.getRank(); // Compute MemRefRegion for 'srcStoreOpInst' at depth 'dstLoopDepth'. @@ -1650,7 +1650,7 @@ public: visitedMemrefs.insert(newMemRef); // Create new node in dependence graph for 'newMemRef' alloc op. unsigned newMemRefNodeId = - mdg->addNode(newMemRef->getDefiningOp()); + mdg->addNode(newMemRef.getDefiningOp()); // Add edge from 'newMemRef' node to dstNode. mdg->addEdge(newMemRefNodeId, dstId, newMemRef); } @@ -1830,7 +1830,7 @@ public: // Search for siblings which load the same memref function argument. auto fn = dstNode->op->getParentOfType<FuncOp>(); for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) { - for (auto *user : fn.getArgument(i)->getUsers()) { + for (auto *user : fn.getArgument(i).getUsers()) { if (auto loadOp = dyn_cast<AffineLoadOp>(user)) { // Gather loops surrounding 'use'. SmallVector<AffineForOp, 4> loops; @@ -1943,10 +1943,10 @@ public: continue; auto memref = pair.first; // Skip if there exist other uses (return operation or function calls). - if (!memref->use_empty()) + if (!memref.use_empty()) continue; // Use list expected to match the dep graph info. - auto *op = memref->getDefiningOp(); + auto *op = memref.getDefiningOp(); if (isa_and_nonnull<AllocOp>(op)) op->erase(); } diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp index fb3d0c0b45c..8f48e41a60c 100644 --- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp +++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp @@ -84,7 +84,7 @@ static LogicalResult moveLoopInvariantCode(LoopLikeOpInterface looplike, // Helper to check whether an operation is loop invariant wrt. SSA properties. auto isDefinedOutsideOfBody = [&](Value value) { - auto definingOp = value->getDefiningOp(); + auto definingOp = value.getDefiningOp(); return (definingOp && !!willBeMovedSet.count(definingOp)) || looplike.isDefinedOutsideOfLoop(value); }; diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp index d3dc81760fc..8755b459b51 100644 --- a/mlir/lib/Transforms/LoopTiling.cpp +++ b/mlir/lib/Transforms/LoopTiling.cpp @@ -241,7 +241,7 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band, constructTiledIndexSetHyperRect(origLoops, newLoops, tileSizes); // In this case, the point loop IVs just replace the original ones. for (unsigned i = 0; i < width; i++) { - origLoopIVs[i]->replaceAllUsesWith(newLoops[i + width].getInductionVar()); + origLoopIVs[i].replaceAllUsesWith(newLoops[i + width].getInductionVar()); } // Erase the old loop nest. diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp index 6c74d545497..a0c42f21a01 100644 --- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp +++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp @@ -211,7 +211,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp, // If the induction variable is used, create a remapping to the value for // this unrolled instance. - if (!forOpIV->use_empty()) { + if (!forOpIV.use_empty()) { // iv' = iv + i, i = 1 to unrollJamFactor-1. auto d0 = builder.getAffineDimExpr(0); auto bumpMap = AffineMap::get(1, 0, {d0 + i * step}); diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index e2514e12cc7..4ac95822947 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -93,7 +93,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(AffineLoadOp loadOp) { // all store ops. SmallVector<Operation *, 8> storeOps; unsigned minSurroundingLoops = getNestingDepth(*loadOpInst); - for (auto *user : loadOp.getMemRef()->getUsers()) { + for (auto *user : loadOp.getMemRef().getUsers()) { auto storeOp = dyn_cast<AffineStoreOp>(user); if (!storeOp) continue; @@ -206,18 +206,18 @@ void MemRefDataFlowOpt::runOnFunction() { // to do this as well, but we'll do it here since we collected these anyway. for (auto memref : memrefsToErase) { // If the memref hasn't been alloc'ed in this function, skip. - Operation *defInst = memref->getDefiningOp(); + Operation *defInst = memref.getDefiningOp(); if (!defInst || !isa<AllocOp>(defInst)) // TODO(mlir-team): if the memref was returned by a 'call' operation, we // could still erase it if the call had no side-effects. continue; - if (llvm::any_of(memref->getUsers(), [&](Operation *ownerInst) { + if (llvm::any_of(memref.getUsers(), [&](Operation *ownerInst) { return (!isa<AffineStoreOp>(ownerInst) && !isa<DeallocOp>(ownerInst)); })) continue; // Erase all stores, the dealloc, and the alloc on the memref. - for (auto *user : llvm::make_early_inc_range(memref->getUsers())) + for (auto *user : llvm::make_early_inc_range(memref.getUsers())) user->erase(); defInst->erase(); } diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index dce02737064..40511f8f39f 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -78,7 +78,7 @@ static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) { return newMemRefType; }; - auto oldMemRefType = oldMemRef->getType().cast<MemRefType>(); + auto oldMemRefType = oldMemRef.getType().cast<MemRefType>(); auto newMemRefType = doubleShape(oldMemRefType); // The double buffer is allocated right before 'forInst'. @@ -205,7 +205,7 @@ static void findMatchingStartFinishInsts( // We only double buffer if the buffer is not live out of loop. auto memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos()); bool escapingUses = false; - for (auto *user : memref->getUsers()) { + for (auto *user : memref.getUsers()) { // We can double buffer regardless of dealloc's outside the loop. if (isa<DeallocOp>(user)) continue; @@ -277,11 +277,11 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { // order to create the double buffer above.) // '-canonicalize' does this in a more general way, but we'll anyway do the // simple/common case so that the output / test cases looks clear. - if (auto *allocInst = oldMemRef->getDefiningOp()) { - if (oldMemRef->use_empty()) { + if (auto *allocInst = oldMemRef.getDefiningOp()) { + if (oldMemRef.use_empty()) { allocInst->erase(); - } else if (oldMemRef->hasOneUse()) { - if (auto dealloc = dyn_cast<DeallocOp>(*oldMemRef->user_begin())) { + } else if (oldMemRef.hasOneUse()) { + if (auto dealloc = dyn_cast<DeallocOp>(*oldMemRef.user_begin())) { dealloc.erase(); allocInst->erase(); } @@ -300,11 +300,11 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { } // If the old tag has no uses or a single dealloc use, remove it. // (canonicalization handles more complex cases). - if (auto *tagAllocInst = oldTagMemRef->getDefiningOp()) { - if (oldTagMemRef->use_empty()) { + if (auto *tagAllocInst = oldTagMemRef.getDefiningOp()) { + if (oldTagMemRef.use_empty()) { tagAllocInst->erase(); - } else if (oldTagMemRef->hasOneUse()) { - if (auto dealloc = dyn_cast<DeallocOp>(*oldTagMemRef->user_begin())) { + } else if (oldTagMemRef.hasOneUse()) { + if (auto dealloc = dyn_cast<DeallocOp>(*oldTagMemRef.user_begin())) { dealloc.erase(); tagAllocInst->erase(); } diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp index 719c6fac731..a96545c0b24 100644 --- a/mlir/lib/Transforms/Utils/FoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp @@ -97,7 +97,7 @@ LogicalResult OperationFolder::tryToFold( // Otherwise, replace all of the result values and erase the operation. for (unsigned i = 0, e = results.size(); i != e; ++i) - op->getResult(i)->replaceAllUsesWith(results[i]); + op->getResult(i).replaceAllUsesWith(results[i]); op->erase(); return success(); } @@ -120,7 +120,7 @@ void OperationFolder::notifyRemoval(Operation *op) { auto &uniquedConstants = foldScopes[getInsertionRegion(interfaces, op)]; // Erase all of the references to this operation. - auto type = op->getResult(0)->getType(); + auto type = op->getResult(0).getType(); for (auto *dialect : it->second) uniquedConstants.erase(std::make_tuple(dialect, constValue, type)); referencedDialects.erase(it); @@ -182,7 +182,7 @@ LogicalResult OperationFolder::tryToFold( Attribute attrRepl = foldResults[i].get<Attribute>(); if (auto *constOp = tryGetOrCreateConstant(uniquedConstants, dialect, builder, attrRepl, - res->getType(), op->getLoc())) { + res.getType(), op->getLoc())) { results.push_back(constOp->getResult(0)); continue; } diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp index 1eb9c57639a..24e1f5eabd7 100644 --- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp +++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp @@ -99,7 +99,7 @@ protected: // before the root is changed. void notifyRootReplaced(Operation *op) override { for (auto result : op->getResults()) - for (auto *user : result->getUsers()) + for (auto *user : result.getUsers()) addToWorklist(user); } @@ -115,9 +115,9 @@ private: // TODO(riverriddle) This is based on the fact that zero use operations // may be deleted, and that single use values often have more // canonicalization opportunities. - if (!operand->use_empty() && !operand->hasOneUse()) + if (!operand.use_empty() && !operand.hasOneUse()) continue; - if (auto *defInst = operand->getDefiningOp()) + if (auto *defInst = operand.getDefiningOp()) addToWorklist(defInst); } } @@ -181,7 +181,7 @@ bool GreedyPatternRewriteDriver::simplify(MutableArrayRef<Region> regions, // Add all the users of the result to the worklist so we make sure // to revisit them. for (auto result : op->getResults()) - for (auto *operand : result->getUsers()) + for (auto *operand : result.getUsers()) addToWorklist(operand); notifyOperationRemoved(op); diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp index 1ac286c67fb..64591209dce 100644 --- a/mlir/lib/Transforms/Utils/InliningUtils.cpp +++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp @@ -199,8 +199,8 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, // Otherwise, there were multiple blocks inlined. Add arguments to the post // insertion block to represent the results to replace. for (Value resultToRepl : resultsToReplace) { - resultToRepl->replaceAllUsesWith( - postInsertBlock->addArgument(resultToRepl->getType())); + resultToRepl.replaceAllUsesWith( + postInsertBlock->addArgument(resultToRepl.getType())); } /// Handle the terminators for each of the new blocks. @@ -238,7 +238,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src, // Verify that the types of the provided values match the function argument // types. BlockArgument regionArg = entryBlock->getArgument(i); - if (inlinedOperands[i]->getType() != regionArg->getType()) + if (inlinedOperands[i].getType() != regionArg.getType()) return failure(); mapper.map(regionArg, inlinedOperands[i]); } @@ -302,7 +302,7 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, // Functor used to cleanup generated state on failure. auto cleanupState = [&] { for (auto *op : castOps) { - op->getResult(0)->replaceAllUsesWith(op->getOperand(0)); + op->getResult(0).replaceAllUsesWith(op->getOperand(0)); op->erase(); } return failure(); @@ -321,8 +321,8 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, // If the call operand doesn't match the expected region argument, try to // generate a cast. - Type regionArgType = regionArg->getType(); - if (operand->getType() != regionArgType) { + Type regionArgType = regionArg.getType(); + if (operand.getType() != regionArgType) { if (!(operand = materializeConversion(callInterface, castOps, castBuilder, operand, regionArgType, castLoc))) return cleanupState(); @@ -334,18 +334,18 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, castBuilder.setInsertionPointAfter(call); for (unsigned i = 0, e = callResults.size(); i != e; ++i) { Value callResult = callResults[i]; - if (callResult->getType() == callableResultTypes[i]) + if (callResult.getType() == callableResultTypes[i]) continue; // Generate a conversion that will produce the original type, so that the IR // is still valid after the original call gets replaced. Value castResult = materializeConversion(callInterface, castOps, castBuilder, callResult, - callResult->getType(), castLoc); + callResult.getType(), castLoc); if (!castResult) return cleanupState(); - callResult->replaceAllUsesWith(castResult); - castResult->getDefiningOp()->replaceUsesOfWith(castResult, callResult); + callResult.replaceAllUsesWith(castResult); + castResult.getDefiningOp()->replaceUsesOfWith(castResult, callResult); } // Attempt to inline the call. diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp index b0d9fdf5fd8..21603113ec9 100644 --- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp @@ -113,7 +113,7 @@ static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) { return WalkResult::advance(); } for (auto value : op->getResults()) { - for (auto user : value->getUsers()) { + for (auto user : value.getUsers()) { SmallVector<AffineForOp, 4> loops; // Check if any loop in loop nest surrounding 'user' is 'opB'. getLoopIVs(*user, &loops); @@ -447,7 +447,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats, // Subtract out any load users of 'storeMemrefs' nested below // 'insertPointParent'. for (auto value : storeMemrefs) { - for (auto *user : value->getUsers()) { + for (auto *user : value.getUsers()) { if (auto loadOp = dyn_cast<AffineLoadOp>(user)) { SmallVector<AffineForOp, 4> loops; // Check if any loop in loop nest surrounding 'user' is diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 0fece54132a..9d7e7cddb05 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -97,9 +97,8 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, canonicalizeMapAndOperands(map, operands); // Remove any affine.apply's that became dead from the simplification above. for (auto v : bumpValues) { - if (v->use_empty()) { - v->getDefiningOp()->erase(); - } + if (v.use_empty()) + v.getDefiningOp()->erase(); } if (lb.use_empty()) lb.erase(); @@ -120,23 +119,23 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) { // Replaces all IV uses to its single iteration value. auto iv = forOp.getInductionVar(); Operation *op = forOp.getOperation(); - if (!iv->use_empty()) { + if (!iv.use_empty()) { if (forOp.hasConstantLowerBound()) { OpBuilder topBuilder(op->getParentOfType<FuncOp>().getBody()); auto constOp = topBuilder.create<ConstantIndexOp>( forOp.getLoc(), forOp.getConstantLowerBound()); - iv->replaceAllUsesWith(constOp); + iv.replaceAllUsesWith(constOp); } else { AffineBound lb = forOp.getLowerBound(); SmallVector<Value, 4> lbOperands(lb.operand_begin(), lb.operand_end()); OpBuilder builder(op->getBlock(), Block::iterator(op)); if (lb.getMap() == builder.getDimIdentityMap()) { // No need of generating an affine.apply. - iv->replaceAllUsesWith(lbOperands[0]); + iv.replaceAllUsesWith(lbOperands[0]); } else { auto affineApplyOp = builder.create<AffineApplyOp>( op->getLoc(), lb.getMap(), lbOperands); - iv->replaceAllUsesWith(affineApplyOp); + iv.replaceAllUsesWith(affineApplyOp); } } } @@ -192,7 +191,7 @@ generateLoop(AffineMap lbMap, AffineMap ubMap, // remapped to results of cloned operations, and their IV used remapped. // Generate the remapping if the shift is not zero: remappedIV = newIV - // shift. - if (!srcIV->use_empty() && shift != 0) { + if (!srcIV.use_empty() && shift != 0) { auto ivRemap = bodyBuilder.create<AffineApplyOp>( srcForInst.getLoc(), bodyBuilder.getSingleDimShiftAffineMap( @@ -474,7 +473,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp, // If the induction variable is used, create a remapping to the value for // this unrolled instance. - if (!forOpIV->use_empty()) { + if (!forOpIV.use_empty()) { // iv' = iv + 1/2/3...unrollFactor-1; auto d0 = builder.getAffineDimExpr(0); auto bumpMap = AffineMap::get(1, 0, {d0 + i * step}); @@ -835,7 +834,7 @@ Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value> sizes) { static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend, int64_t divisor) { assert(divisor > 0 && "expected positive divisor"); - assert(dividend->getType().isIndex() && "expected index-typed value"); + assert(dividend.getType().isIndex() && "expected index-typed value"); Value divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1); Value divisorCst = builder.create<ConstantIndexOp>(loc, divisor); @@ -849,7 +848,7 @@ static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend, // where divis is rounding-to-zero division. static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend, Value divisor) { - assert(dividend->getType().isIndex() && "expected index-typed value"); + assert(dividend.getType().isIndex() && "expected index-typed value"); Value cstOne = builder.create<ConstantIndexOp>(loc, 1); Value divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne); @@ -968,7 +967,7 @@ TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp, static void replaceAllUsesExcept(Value orig, Value replacement, const SmallPtrSetImpl<Operation *> &exceptions) { - for (auto &use : llvm::make_early_inc_range(orig->getUses())) { + for (auto &use : llvm::make_early_inc_range(orig.getUses())) { if (exceptions.count(use.getOwner()) == 0) use.set(replacement); } @@ -992,12 +991,12 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer, // a constant one step. bool isZeroBased = false; if (auto ubCst = - dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound()->getDefiningOp())) + dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound().getDefiningOp())) isZeroBased = ubCst.getValue() == 0; bool isStepOne = false; if (auto stepCst = - dyn_cast_or_null<ConstantIndexOp>(loop.step()->getDefiningOp())) + dyn_cast_or_null<ConstantIndexOp>(loop.step().getDefiningOp())) isStepOne = stepCst.getValue() == 1; if (isZeroBased && isStepOne) @@ -1034,8 +1033,8 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer, Value shifted = isZeroBased ? scaled : builder.create<AddIOp>(loc, scaled, lb); - SmallPtrSet<Operation *, 2> preserve{scaled->getDefiningOp(), - shifted->getDefiningOp()}; + SmallPtrSet<Operation *, 2> preserve{scaled.getDefiningOp(), + shifted.getDefiningOp()}; replaceAllUsesExcept(loop.getInductionVar(), shifted, preserve); } @@ -1175,7 +1174,7 @@ static void getMultiLevelStrides(const MemRefRegion ®ion, int64_t numEltPerStride = 1; int64_t stride = 1; for (int d = bufferShape.size() - 1; d >= 1; d--) { - int64_t dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d); + int64_t dimSize = region.memref.getType().cast<MemRefType>().getDimSize(d); stride *= dimSize; numEltPerStride *= bufferShape[d]; // A stride is needed only if the region has a shorter extent than the @@ -1295,7 +1294,7 @@ static LogicalResult generateCopy( auto loc = region.loc; auto memref = region.memref; - auto memRefType = memref->getType().cast<MemRefType>(); + auto memRefType = memref.getType().cast<MemRefType>(); auto layoutMaps = memRefType.getAffineMaps(); if (layoutMaps.size() > 1 || @@ -1560,7 +1559,7 @@ static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs, assert(false && "expected load or store op"); return false; } - auto memRefType = region->memref->getType().cast<MemRefType>(); + auto memRefType = region->memref.getType().cast<MemRefType>(); if (!memRefType.hasStaticShape()) return false; diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp index ca26074f288..197f608f82c 100644 --- a/mlir/lib/Transforms/Utils/RegionUtils.cpp +++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp @@ -20,7 +20,7 @@ using namespace mlir; void mlir::replaceAllUsesInRegionWith(Value orig, Value replacement, Region ®ion) { - for (auto &use : llvm::make_early_inc_range(orig->getUses())) { + for (auto &use : llvm::make_early_inc_range(orig.getUses())) { if (region.isAncestor(use.getOwner()->getParentRegion())) use.set(replacement); } @@ -42,7 +42,7 @@ void mlir::visitUsedValuesDefinedAbove( region.walk([callback, &properAncestors](Operation *op) { for (OpOperand &operand : op->getOpOperands()) // Callback on values defined in a proper ancestor of region. - if (properAncestors.count(operand.get()->getParentRegion())) + if (properAncestors.count(operand.get().getParentRegion())) callback(&operand); }); } @@ -180,7 +180,7 @@ static bool isUseSpeciallyKnownDead(OpOperand &use, LiveMap &liveMap) { } static void processValue(Value value, LiveMap &liveMap) { - bool provedLive = llvm::any_of(value->getUses(), [&](OpOperand &use) { + bool provedLive = llvm::any_of(value.getUses(), [&](OpOperand &use) { if (isUseSpeciallyKnownDead(use, liveMap)) return false; return liveMap.wasProvenLive(use.getOwner()); diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp index a6629183dee..8dbccd1a72d 100644 --- a/mlir/lib/Transforms/Utils/Utils.cpp +++ b/mlir/lib/Transforms/Utils/Utils.cpp @@ -52,9 +52,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, AffineMap indexRemap, ArrayRef<Value> extraOperands, ArrayRef<Value> symbolOperands) { - unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank(); + unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank(); (void)newMemRefRank; // unused in opt mode - unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank(); + unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank(); (void)oldMemRefRank; // unused in opt mode if (indexRemap) { assert(indexRemap.getNumSymbols() == symbolOperands.size() && @@ -67,8 +67,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, } // Assert same elemental type. - assert(oldMemRef->getType().cast<MemRefType>().getElementType() == - newMemRef->getType().cast<MemRefType>().getElementType()); + assert(oldMemRef.getType().cast<MemRefType>().getElementType() == + newMemRef.getType().cast<MemRefType>().getElementType()); if (!isMemRefDereferencingOp(*op)) // Failure: memref used in a non-dereferencing context (potentially @@ -152,7 +152,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, // Prepend 'extraIndices' in 'newMapOperands'. for (auto extraIndex : extraIndices) { - assert(extraIndex->getDefiningOp()->getNumResults() == 1 && + assert(extraIndex.getDefiningOp()->getNumResults() == 1 && "single result op's expected to generate these indices"); assert((isValidDim(extraIndex) || isValidSymbol(extraIndex)) && "invalid memory op index"); @@ -171,8 +171,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, canonicalizeMapAndOperands(&newMap, &newMapOperands); // Remove any affine.apply's that became dead as a result of composition. for (auto value : affineApplyOps) - if (value->use_empty()) - value->getDefiningOp()->erase(); + if (value.use_empty()) + value.getDefiningOp()->erase(); // Construct the new operation using this memref. OperationState state(op->getLoc(), op->getName()); @@ -195,7 +195,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, // Result types don't change. Both memref's are of the same elemental type. state.types.reserve(op->getNumResults()); for (auto result : op->getResults()) - state.types.push_back(result->getType()); + state.types.push_back(result.getType()); // Add attribute for 'newMap', other Attributes do not change. auto newMapAttr = AffineMapAttr::get(newMap); @@ -222,9 +222,9 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef<Value> symbolOperands, Operation *domInstFilter, Operation *postDomInstFilter) { - unsigned newMemRefRank = newMemRef->getType().cast<MemRefType>().getRank(); + unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank(); (void)newMemRefRank; // unused in opt mode - unsigned oldMemRefRank = oldMemRef->getType().cast<MemRefType>().getRank(); + unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank(); (void)oldMemRefRank; if (indexRemap) { assert(indexRemap.getNumSymbols() == symbolOperands.size() && @@ -237,8 +237,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, } // Assert same elemental type. - assert(oldMemRef->getType().cast<MemRefType>().getElementType() == - newMemRef->getType().cast<MemRefType>().getElementType()); + assert(oldMemRef.getType().cast<MemRefType>().getElementType() == + newMemRef.getType().cast<MemRefType>().getElementType()); std::unique_ptr<DominanceInfo> domInfo; std::unique_ptr<PostDominanceInfo> postDomInfo; @@ -254,7 +254,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, // DenseSet since an operation could potentially have multiple uses of a // memref (although rare), and the replacement later is going to erase ops. DenseSet<Operation *> opsToReplace; - for (auto *op : oldMemRef->getUsers()) { + for (auto *op : oldMemRef.getUsers()) { // Skip this use if it's not dominated by domInstFilter. if (domInstFilter && !domInfo->dominates(domInstFilter, op)) continue; @@ -325,7 +325,7 @@ void mlir::createAffineComputationSlice( SmallVector<Value, 4> subOperands; subOperands.reserve(opInst->getNumOperands()); for (auto operand : opInst->getOperands()) - if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp())) + if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp())) subOperands.push_back(operand); // Gather sequence of AffineApplyOps reachable from 'subOperands'. @@ -340,7 +340,7 @@ void mlir::createAffineComputationSlice( bool localized = true; for (auto *op : affineApplyOps) { for (auto result : op->getResults()) { - for (auto *user : result->getUsers()) { + for (auto *user : result.getUsers()) { if (user != opInst) { localized = false; break; @@ -461,9 +461,9 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) { } // Replace any uses of the original alloc op and erase it. All remaining uses // have to be dealloc's; RAMUW above would've failed otherwise. - assert(std::all_of(oldMemRef->user_begin(), oldMemRef->user_end(), - [](Operation *op) { return isa<DeallocOp>(op); })); - oldMemRef->replaceAllUsesWith(newAlloc); + assert(llvm::all_of(oldMemRef.getUsers(), + [](Operation *op) { return isa<DeallocOp>(op); })); + oldMemRef.replaceAllUsesWith(newAlloc); allocOp.erase(); return success(); } diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index 6b2b3e1ee7e..fb10ed94062 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -797,7 +797,7 @@ template <typename LoadOrStoreOpPointer> static LogicalResult vectorizeRootOrTerminal(Value iv, LoadOrStoreOpPointer memoryOp, VectorizationState *state) { - auto memRefType = memoryOp.getMemRef()->getType().template cast<MemRefType>(); + auto memRefType = memoryOp.getMemRef().getType().template cast<MemRefType>(); auto elementType = memRefType.getElementType(); // TODO(ntv): ponder whether we want to further vectorize a vector value. @@ -981,10 +981,9 @@ static Value vectorizeConstant(Operation *op, ConstantOp constant, Type type) { /// TODO(ntv): handle more complex cases. static Value vectorizeOperand(Value operand, Operation *op, VectorizationState *state) { - LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: "); - LLVM_DEBUG(operand->print(dbgs())); + LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: " << operand); // 1. If this value has already been vectorized this round, we are done. - if (state->vectorizedSet.count(operand->getDefiningOp()) > 0) { + if (state->vectorizedSet.count(operand.getDefiningOp()) > 0) { LLVM_DEBUG(dbgs() << " -> already vector operand"); return operand; } @@ -995,24 +994,22 @@ static Value vectorizeOperand(Value operand, Operation *op, auto it = state->replacementMap.find(operand); if (it != state->replacementMap.end()) { auto res = it->second; - LLVM_DEBUG(dbgs() << "-> delayed replacement by: "); - LLVM_DEBUG(res->print(dbgs())); + LLVM_DEBUG(dbgs() << "-> delayed replacement by: " << res); return res; } // 2. TODO(ntv): broadcast needed. - if (operand->getType().isa<VectorType>()) { + if (operand.getType().isa<VectorType>()) { LLVM_DEBUG(dbgs() << "-> non-vectorizable"); return nullptr; } // 3. vectorize constant. - if (auto constant = dyn_cast<ConstantOp>(operand->getDefiningOp())) { + if (auto constant = dyn_cast<ConstantOp>(operand.getDefiningOp())) { return vectorizeConstant( op, constant, - VectorType::get(state->strategy->vectorSizes, operand->getType())); + VectorType::get(state->strategy->vectorSizes, operand.getType())); } // 4. currently non-vectorizable. - LLVM_DEBUG(dbgs() << "-> non-vectorizable"); - LLVM_DEBUG(operand->print(dbgs())); + LLVM_DEBUG(dbgs() << "-> non-vectorizable: " << operand); return nullptr; } @@ -1073,7 +1070,7 @@ static Operation *vectorizeOneOperation(Operation *opInst, SmallVector<Type, 8> vectorTypes; for (auto v : opInst->getResults()) { vectorTypes.push_back( - VectorType::get(state->strategy->vectorSizes, v->getType())); + VectorType::get(state->strategy->vectorSizes, v.getType())); } SmallVector<Value, 8> vectorOperands; for (auto v : opInst->getOperands()) { @@ -1179,7 +1176,7 @@ static LogicalResult vectorizeRootMatch(NestedMatch m, auto clonedLoop = cast<AffineForOp>(builder.clone(*loopInst)); struct Guard { LogicalResult failure() { - loop.getInductionVar()->replaceAllUsesWith(clonedLoop.getInductionVar()); + loop.getInductionVar().replaceAllUsesWith(clonedLoop.getInductionVar()); loop.erase(); return mlir::failure(); } |