summaryrefslogtreecommitdiffstats
path: root/mlir/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/lib/Transforms')
-rw-r--r--mlir/lib/Transforms/DialectConversion.cpp13
-rw-r--r--mlir/lib/Transforms/Inliner.cpp8
-rw-r--r--mlir/lib/Transforms/LoopInvariantCodeMotion.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/FoldUtils.cpp7
-rw-r--r--mlir/lib/Transforms/Utils/InliningUtils.cpp11
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp16
-rw-r--r--mlir/lib/Transforms/Utils/RegionUtils.cpp12
-rw-r--r--mlir/lib/Transforms/Vectorize.cpp14
-rw-r--r--mlir/lib/Transforms/ViewOpGraph.cpp80
-rw-r--r--mlir/lib/Transforms/ViewRegionGraph.cpp21
10 files changed, 88 insertions, 96 deletions
diff --git a/mlir/lib/Transforms/DialectConversion.cpp b/mlir/lib/Transforms/DialectConversion.cpp
index 4b4575a5e50..37c918fe9be 100644
--- a/mlir/lib/Transforms/DialectConversion.cpp
+++ b/mlir/lib/Transforms/DialectConversion.cpp
@@ -35,7 +35,7 @@ using namespace mlir::detail;
/// If 'target' is nonnull, operations that are recursively legal have their
/// regions pre-filtered to avoid considering them for legalization.
static LogicalResult
-computeConversionSet(llvm::iterator_range<Region::iterator> region,
+computeConversionSet(iterator_range<Region::iterator> region,
Location regionLoc, std::vector<Operation *> &toConvert,
ConversionTarget *target = nullptr) {
if (llvm::empty(region))
@@ -537,9 +537,8 @@ struct ConversionPatternRewriterImpl {
Region::iterator before);
/// Notifies that the blocks of a region were cloned into another.
- void
- notifyRegionWasClonedBefore(llvm::iterator_range<Region::iterator> &blocks,
- Location origRegionLoc);
+ void notifyRegionWasClonedBefore(iterator_range<Region::iterator> &blocks,
+ Location origRegionLoc);
/// Remap the given operands to those with potentially different types.
void remapValues(Operation::operand_range operands,
@@ -742,7 +741,7 @@ void ConversionPatternRewriterImpl::notifyRegionIsBeingInlinedBefore(
}
void ConversionPatternRewriterImpl::notifyRegionWasClonedBefore(
- llvm::iterator_range<Region::iterator> &blocks, Location origRegionLoc) {
+ iterator_range<Region::iterator> &blocks, Location origRegionLoc) {
for (Block &block : blocks)
blockActions.push_back(BlockAction::getCreate(&block));
@@ -986,7 +985,7 @@ private:
void computeLegalizationGraphBenefit();
/// The current set of patterns that have been applied.
- llvm::SmallPtrSet<RewritePattern *, 8> appliedPatterns;
+ SmallPtrSet<RewritePattern *, 8> appliedPatterns;
/// The set of legality information for operations transitively supported by
/// the target.
@@ -1572,7 +1571,7 @@ void mlir::populateFuncOpTypeConversionPattern(
/// 'convertSignatureArg' for each argument. This function should return a valid
/// conversion for the signature on success, None otherwise.
auto TypeConverter::convertBlockSignature(Block *block)
- -> llvm::Optional<SignatureConversion> {
+ -> Optional<SignatureConversion> {
SignatureConversion conversion(block->getNumArguments());
for (unsigned i = 0, e = block->getNumArguments(); i != e; ++i)
if (failed(convertSignatureArg(i, block->getArgument(i)->getType(),
diff --git a/mlir/lib/Transforms/Inliner.cpp b/mlir/lib/Transforms/Inliner.cpp
index dbb5381ed70..9948a429616 100644
--- a/mlir/lib/Transforms/Inliner.cpp
+++ b/mlir/lib/Transforms/Inliner.cpp
@@ -79,11 +79,11 @@ struct ResolvedCall {
/// Collect all of the callable operations within the given range of blocks. If
/// `traverseNestedCGNodes` is true, this will also collect call operations
/// inside of nested callgraph nodes.
-static void collectCallOps(llvm::iterator_range<Region::iterator> blocks,
+static void collectCallOps(iterator_range<Region::iterator> blocks,
CallGraph &cg, SmallVectorImpl<ResolvedCall> &calls,
bool traverseNestedCGNodes) {
SmallVector<Block *, 8> worklist;
- auto addToWorklist = [&](llvm::iterator_range<Region::iterator> blocks) {
+ auto addToWorklist = [&](iterator_range<Region::iterator> blocks) {
for (Block &block : blocks)
worklist.push_back(&block);
};
@@ -120,8 +120,8 @@ struct Inliner : public InlinerInterface {
/// Process a set of blocks that have been inlined. This callback is invoked
/// *before* inlined terminator operations have been processed.
- void processInlinedBlocks(
- llvm::iterator_range<Region::iterator> inlinedBlocks) final {
+ void
+ processInlinedBlocks(iterator_range<Region::iterator> inlinedBlocks) final {
collectCallOps(inlinedBlocks, cg, calls, /*traverseNestedCGNodes=*/true);
}
diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index 738524aa6ec..4932494a04b 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -50,7 +50,7 @@ public:
// - the op has no side-effects. If sideEffecting is Never, sideeffects of this
// op and its nested ops are ignored.
static bool canBeHoisted(Operation *op,
- llvm::function_ref<bool(Value *)> definedOutside,
+ function_ref<bool(Value *)> definedOutside,
SideEffecting sideEffecting,
SideEffectsInterface &interface) {
// Check that dependencies are defined outside of loop.
diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index 5faca1296a8..d4b7caae527 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -82,9 +82,8 @@ static Operation *materializeConstant(Dialect *dialect, OpBuilder &builder,
//===----------------------------------------------------------------------===//
LogicalResult OperationFolder::tryToFold(
- Operation *op,
- llvm::function_ref<void(Operation *)> processGeneratedConstants,
- llvm::function_ref<void(Operation *)> preReplaceAction) {
+ Operation *op, function_ref<void(Operation *)> processGeneratedConstants,
+ function_ref<void(Operation *)> preReplaceAction) {
// If this is a unique'd constant, return failure as we know that it has
// already been folded.
if (referencedDialects.count(op))
@@ -140,7 +139,7 @@ void OperationFolder::notifyRemoval(Operation *op) {
/// `results` with the results of the folding.
LogicalResult OperationFolder::tryToFold(
Operation *op, SmallVectorImpl<Value *> &results,
- llvm::function_ref<void(Operation *)> processGeneratedConstants) {
+ function_ref<void(Operation *)> processGeneratedConstants) {
SmallVector<Attribute, 8> operandConstants;
SmallVector<OpFoldResult, 8> foldResults;
diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp
index fd08c53b0dc..e8e6ae03338 100644
--- a/mlir/lib/Transforms/Utils/InliningUtils.cpp
+++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp
@@ -35,7 +35,7 @@ using namespace mlir;
/// Remap locations from the inlined blocks with CallSiteLoc locations with the
/// provided caller location.
static void
-remapInlinedLocations(llvm::iterator_range<Region::iterator> inlinedBlocks,
+remapInlinedLocations(iterator_range<Region::iterator> inlinedBlocks,
Location callerLoc) {
DenseMap<Location, Location> mappedLocations;
auto remapOpLoc = [&](Operation *op) {
@@ -50,9 +50,8 @@ remapInlinedLocations(llvm::iterator_range<Region::iterator> inlinedBlocks,
block.walk(remapOpLoc);
}
-static void
-remapInlinedOperands(llvm::iterator_range<Region::iterator> inlinedBlocks,
- BlockAndValueMapping &mapper) {
+static void remapInlinedOperands(iterator_range<Region::iterator> inlinedBlocks,
+ BlockAndValueMapping &mapper) {
auto remapOperands = [&](Operation *op) {
for (auto &operand : op->getOpOperands())
if (auto *mappedOp = mapper.lookupOrNull(operand.get()))
@@ -133,7 +132,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
BlockAndValueMapping &mapper,
ArrayRef<Value *> resultsToReplace,
- llvm::Optional<Location> inlineLoc,
+ Optional<Location> inlineLoc,
bool shouldCloneInlinedRegion) {
// We expect the region to have at least one block.
if (src->empty())
@@ -226,7 +225,7 @@ LogicalResult mlir::inlineRegion(InlinerInterface &interface, Region *src,
Operation *inlinePoint,
ArrayRef<Value *> inlinedOperands,
ArrayRef<Value *> resultsToReplace,
- llvm::Optional<Location> inlineLoc,
+ Optional<Location> inlineLoc,
bool shouldCloneInlinedRegion) {
// We expect the region to have at least one block.
if (src->empty())
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 50248b01359..419df8d2705 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -532,11 +532,11 @@ void mlir::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) {
// desired loop interchange would violate dependences by making the
// dependence component lexicographically negative.
static bool checkLoopInterchangeDependences(
- const std::vector<llvm::SmallVector<DependenceComponent, 2>> &depCompsVec,
+ const std::vector<SmallVector<DependenceComponent, 2>> &depCompsVec,
ArrayRef<AffineForOp> loops, ArrayRef<unsigned> loopPermMap) {
// Invert permutation map.
unsigned maxLoopDepth = loops.size();
- llvm::SmallVector<unsigned, 4> loopPermMapInv;
+ SmallVector<unsigned, 4> loopPermMapInv;
loopPermMapInv.resize(maxLoopDepth);
for (unsigned i = 0; i < maxLoopDepth; ++i)
loopPermMapInv[loopPermMap[i]] = i;
@@ -547,7 +547,7 @@ static bool checkLoopInterchangeDependences(
// Example 1: [-1, 1][0, 0]
// Example 2: [0, 0][-1, 1]
for (unsigned i = 0, e = depCompsVec.size(); i < e; ++i) {
- const llvm::SmallVector<DependenceComponent, 2> &depComps = depCompsVec[i];
+ const SmallVector<DependenceComponent, 2> &depComps = depCompsVec[i];
assert(depComps.size() >= maxLoopDepth);
// Check if the first non-zero dependence component is positive.
// This iterates through loops in the desired order.
@@ -572,7 +572,7 @@ bool mlir::isValidLoopInterchangePermutation(ArrayRef<AffineForOp> loops,
// rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth].
assert(loopPermMap.size() == loops.size());
unsigned maxLoopDepth = loops.size();
- std::vector<llvm::SmallVector<DependenceComponent, 2>> depCompsVec;
+ std::vector<SmallVector<DependenceComponent, 2>> depCompsVec;
getDependenceComponents(loops[0], maxLoopDepth, &depCompsVec);
return checkLoopInterchangeDependences(depCompsVec, loops, loopPermMap);
}
@@ -608,13 +608,13 @@ AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) {
// Gather dependence components for dependences between all ops in loop nest
// rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth].
unsigned maxLoopDepth = loops.size();
- std::vector<llvm::SmallVector<DependenceComponent, 2>> depCompsVec;
+ std::vector<SmallVector<DependenceComponent, 2>> depCompsVec;
getDependenceComponents(loops[0], maxLoopDepth, &depCompsVec);
// Mark loops as either parallel or sequential.
- llvm::SmallVector<bool, 8> isParallelLoop(maxLoopDepth, true);
+ SmallVector<bool, 8> isParallelLoop(maxLoopDepth, true);
for (unsigned i = 0, e = depCompsVec.size(); i < e; ++i) {
- llvm::SmallVector<DependenceComponent, 2> &depComps = depCompsVec[i];
+ SmallVector<DependenceComponent, 2> &depComps = depCompsVec[i];
assert(depComps.size() >= maxLoopDepth);
for (unsigned j = 0; j < maxLoopDepth; ++j) {
DependenceComponent &depComp = depComps[j];
@@ -632,7 +632,7 @@ AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) {
// Compute permutation of loops that sinks sequential loops (and thus raises
// parallel loops) while preserving relative order.
- llvm::SmallVector<unsigned, 4> loopPermMap(maxLoopDepth);
+ SmallVector<unsigned, 4> loopPermMap(maxLoopDepth);
unsigned nextSequentialLoop = numParallelLoops;
unsigned nextParallelLoop = 0;
for (unsigned i = 0; i < maxLoopDepth; ++i) {
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index ba77ceacf28..b91b189b381 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -36,14 +36,13 @@ void mlir::replaceAllUsesInRegionWith(Value *orig, Value *replacement,
}
void mlir::visitUsedValuesDefinedAbove(
- Region &region, Region &limit,
- llvm::function_ref<void(OpOperand *)> callback) {
+ Region &region, Region &limit, function_ref<void(OpOperand *)> callback) {
assert(limit.isAncestor(&region) &&
"expected isolation limit to be an ancestor of the given region");
// Collect proper ancestors of `limit` upfront to avoid traversing the region
// tree for every value.
- llvm::SmallPtrSet<Region *, 4> properAncestors;
+ SmallPtrSet<Region *, 4> properAncestors;
for (auto *reg = limit.getParentRegion(); reg != nullptr;
reg = reg->getParentRegion()) {
properAncestors.insert(reg);
@@ -58,8 +57,7 @@ void mlir::visitUsedValuesDefinedAbove(
}
void mlir::visitUsedValuesDefinedAbove(
- llvm::MutableArrayRef<Region> regions,
- llvm::function_ref<void(OpOperand *)> callback) {
+ MutableArrayRef<Region> regions, function_ref<void(OpOperand *)> callback) {
for (Region &region : regions)
visitUsedValuesDefinedAbove(region, region, callback);
}
@@ -71,7 +69,7 @@ void mlir::getUsedValuesDefinedAbove(Region &region, Region &limit,
});
}
-void mlir::getUsedValuesDefinedAbove(llvm::MutableArrayRef<Region> regions,
+void mlir::getUsedValuesDefinedAbove(MutableArrayRef<Region> regions,
llvm::SetVector<Value *> &values) {
for (Region &region : regions)
getUsedValuesDefinedAbove(region, region, values);
@@ -352,7 +350,7 @@ static LogicalResult runRegionDCE(MutableArrayRef<Region> regions) {
/// includes transformations like unreachable block elimination, dead argument
/// elimination, as well as some other DCE. This function returns success if any
/// of the regions were simplified, failure otherwise.
-LogicalResult mlir::simplifyRegions(llvm::MutableArrayRef<Region> regions) {
+LogicalResult mlir::simplifyRegions(MutableArrayRef<Region> regions) {
LogicalResult eliminatedBlocks = eraseUnreachableBlocks(regions);
LogicalResult eliminatedOpsOrArgs = runRegionDCE(regions);
return success(succeeded(eliminatedBlocks) || succeeded(eliminatedOpsOrArgs));
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index 036e53435ae..e3212d54e42 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -557,7 +557,7 @@ static llvm::cl::list<int> clFastestVaryingPattern(
/// Forward declaration.
static FilterFunctionType
-isVectorizableLoopPtrFactory(const llvm::DenseSet<Operation *> &parallelLoops,
+isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
int fastestVaryingMemRefDimension);
/// Creates a vectorization pattern from the command line arguments.
@@ -565,7 +565,7 @@ isVectorizableLoopPtrFactory(const llvm::DenseSet<Operation *> &parallelLoops,
/// If the command line argument requests a pattern of higher order, returns an
/// empty pattern list which will conservatively result in no vectorization.
static std::vector<NestedPattern>
-makePatterns(const llvm::DenseSet<Operation *> &parallelLoops, int vectorRank,
+makePatterns(const DenseSet<Operation *> &parallelLoops, int vectorRank,
ArrayRef<int64_t> fastestVaryingPattern) {
using matcher::For;
int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
@@ -842,8 +842,8 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv,
map(makePtrDynCaster<Value>(), indices),
AffineMapAttr::get(permutationMap),
// TODO(b/144455320) add a proper padding value, not just 0.0 : f32
- state->folder->create<ConstantFloatOp>(
- b, opInst->getLoc(), llvm::APFloat(0.0f), b.getF32Type()));
+ state->folder->create<ConstantFloatOp>(b, opInst->getLoc(),
+ APFloat(0.0f), b.getF32Type()));
state->registerReplacement(opInst, transfer.getOperation());
} else {
state->registerTerminal(opInst);
@@ -889,7 +889,7 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step,
/// loop whose underlying load/store accesses are either invariant or all
// varying along the `fastestVaryingMemRefDimension`.
static FilterFunctionType
-isVectorizableLoopPtrFactory(const llvm::DenseSet<Operation *> &parallelLoops,
+isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
int fastestVaryingMemRefDimension) {
return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
auto loop = cast<AffineForOp>(forOp);
@@ -1255,7 +1255,7 @@ void Vectorize::runOnFunction() {
// Thread-safe RAII local context, BumpPtrAllocator freed on exit.
NestedPatternContext mlContext;
- llvm::DenseSet<Operation *> parallelLoops;
+ DenseSet<Operation *> parallelLoops;
f.walk([&parallelLoops](AffineForOp loop) {
if (isLoopParallel(loop))
parallelLoops.insert(loop);
@@ -1293,7 +1293,7 @@ void Vectorize::runOnFunction() {
}
std::unique_ptr<OpPassBase<FuncOp>>
-mlir::createVectorizePass(llvm::ArrayRef<int64_t> virtualVectorSize) {
+mlir::createVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
return std::make_unique<Vectorize>(virtualVectorSize);
}
diff --git a/mlir/lib/Transforms/ViewOpGraph.cpp b/mlir/lib/Transforms/ViewOpGraph.cpp
index 503a82bf82b..591562d0245 100644
--- a/mlir/lib/Transforms/ViewOpGraph.cpp
+++ b/mlir/lib/Transforms/ViewOpGraph.cpp
@@ -28,15 +28,17 @@ static llvm::cl::opt<int> elideIfLarger(
llvm::cl::desc("Upper limit to emit elements attribute rather than elide"),
llvm::cl::init(16));
+using namespace mlir;
+
namespace llvm {
// Specialize GraphTraits to treat Block as a graph of Operations as nodes and
// uses as edges.
-template <> struct GraphTraits<mlir::Block *> {
- using GraphType = mlir::Block *;
- using NodeRef = mlir::Operation *;
+template <> struct GraphTraits<Block *> {
+ using GraphType = Block *;
+ using NodeRef = Operation *;
- using ChildIteratorType = mlir::UseIterator;
+ using ChildIteratorType = UseIterator;
static ChildIteratorType child_begin(NodeRef n) {
return ChildIteratorType(n);
}
@@ -46,49 +48,46 @@ template <> struct GraphTraits<mlir::Block *> {
// Operation's destructor is private so use Operation* instead and use
// mapped iterator.
- static mlir::Operation *AddressOf(mlir::Operation &op) { return &op; }
- using nodes_iterator =
- mapped_iterator<mlir::Block::iterator, decltype(&AddressOf)>;
- static nodes_iterator nodes_begin(mlir::Block *b) {
+ static Operation *AddressOf(Operation &op) { return &op; }
+ using nodes_iterator = mapped_iterator<Block::iterator, decltype(&AddressOf)>;
+ static nodes_iterator nodes_begin(Block *b) {
return nodes_iterator(b->begin(), &AddressOf);
}
- static nodes_iterator nodes_end(mlir::Block *b) {
+ static nodes_iterator nodes_end(Block *b) {
return nodes_iterator(b->end(), &AddressOf);
}
};
// Specialize DOTGraphTraits to produce more readable output.
-template <>
-struct DOTGraphTraits<mlir::Block *> : public DefaultDOTGraphTraits {
+template <> struct DOTGraphTraits<Block *> : public DefaultDOTGraphTraits {
using DefaultDOTGraphTraits::DefaultDOTGraphTraits;
- static std::string getNodeLabel(mlir::Operation *op, mlir::Block *);
+ static std::string getNodeLabel(Operation *op, Block *);
};
-std::string DOTGraphTraits<mlir::Block *>::getNodeLabel(mlir::Operation *op,
- mlir::Block *b) {
+std::string DOTGraphTraits<Block *>::getNodeLabel(Operation *op, Block *b) {
// Reuse the print output for the node labels.
std::string ostr;
raw_string_ostream os(ostr);
os << op->getName() << "\n";
- if (!op->getLoc().isa<mlir::UnknownLoc>()) {
+ if (!op->getLoc().isa<UnknownLoc>()) {
os << op->getLoc() << "\n";
}
// Print resultant types
- mlir::interleaveComma(op->getResultTypes(), os);
+ interleaveComma(op->getResultTypes(), os);
os << "\n";
for (auto attr : op->getAttrs()) {
os << '\n' << attr.first << ": ";
// Always emit splat attributes.
- if (attr.second.isa<mlir::SplatElementsAttr>()) {
+ if (attr.second.isa<SplatElementsAttr>()) {
attr.second.print(os);
continue;
}
// Elide "big" elements attributes.
- auto elements = attr.second.dyn_cast<mlir::ElementsAttr>();
+ auto elements = attr.second.dyn_cast<ElementsAttr>();
if (elements && elements.getNumElements() > elideIfLarger) {
os << std::string(elements.getType().getRank(), '[') << "..."
<< std::string(elements.getType().getRank(), ']') << " : "
@@ -96,7 +95,7 @@ std::string DOTGraphTraits<mlir::Block *>::getNodeLabel(mlir::Operation *op,
continue;
}
- auto array = attr.second.dyn_cast<mlir::ArrayAttr>();
+ auto array = attr.second.dyn_cast<ArrayAttr>();
if (array && static_cast<int64_t>(array.size()) > elideIfLarger) {
os << "[...]";
continue;
@@ -114,14 +113,14 @@ namespace {
// PrintOpPass is simple pass to write graph per function.
// Note: this is a module pass only to avoid interleaving on the same ostream
// due to multi-threading over functions.
-struct PrintOpPass : public mlir::ModulePass<PrintOpPass> {
- explicit PrintOpPass(llvm::raw_ostream &os = llvm::errs(),
- bool short_names = false, const llvm::Twine &title = "")
+struct PrintOpPass : public ModulePass<PrintOpPass> {
+ explicit PrintOpPass(raw_ostream &os = llvm::errs(), bool short_names = false,
+ const Twine &title = "")
: os(os), title(title.str()), short_names(short_names) {}
- std::string getOpName(mlir::Operation &op) {
- auto symbolAttr = op.getAttrOfType<mlir::StringAttr>(
- mlir::SymbolTable::getSymbolAttrName());
+ std::string getOpName(Operation &op) {
+ auto symbolAttr =
+ op.getAttrOfType<StringAttr>(SymbolTable::getSymbolAttrName());
if (symbolAttr)
return symbolAttr.getValue();
++unnamedOpCtr;
@@ -129,22 +128,22 @@ struct PrintOpPass : public mlir::ModulePass<PrintOpPass> {
}
// Print all the ops in a module.
- void processModule(mlir::ModuleOp module) {
- for (mlir::Operation &op : module) {
+ void processModule(ModuleOp module) {
+ for (Operation &op : module) {
// Modules may actually be nested, recurse on nesting.
- if (auto nestedModule = llvm::dyn_cast<mlir::ModuleOp>(op)) {
+ if (auto nestedModule = dyn_cast<ModuleOp>(op)) {
processModule(nestedModule);
continue;
}
auto opName = getOpName(op);
- for (mlir::Region &region : op.getRegions()) {
+ for (Region &region : op.getRegions()) {
for (auto indexed_block : llvm::enumerate(region)) {
// Suffix block number if there are more than 1 block.
auto blockName = region.getBlocks().size() == 1
? ""
: ("__" + llvm::utostr(indexed_block.index()));
llvm::WriteGraph(os, &indexed_block.value(), short_names,
- llvm::Twine(title) + opName + blockName);
+ Twine(title) + opName + blockName);
}
}
}
@@ -153,29 +152,28 @@ struct PrintOpPass : public mlir::ModulePass<PrintOpPass> {
void runOnModule() override { processModule(getModule()); }
private:
- llvm::raw_ostream &os;
+ raw_ostream &os;
std::string title;
int unnamedOpCtr = 0;
bool short_names;
};
} // namespace
-void mlir::viewGraph(mlir::Block &block, const llvm::Twine &name,
- bool shortNames, const llvm::Twine &title,
- llvm::GraphProgram::Name program) {
+void mlir::viewGraph(Block &block, const Twine &name, bool shortNames,
+ const Twine &title, llvm::GraphProgram::Name program) {
llvm::ViewGraph(&block, name, shortNames, title, program);
}
-llvm::raw_ostream &mlir::writeGraph(llvm::raw_ostream &os, mlir::Block &block,
- bool shortNames, const llvm::Twine &title) {
+raw_ostream &mlir::writeGraph(raw_ostream &os, Block &block, bool shortNames,
+ const Twine &title) {
return llvm::WriteGraph(os, &block, shortNames, title);
}
-std::unique_ptr<mlir::OpPassBase<mlir::ModuleOp>>
-mlir::createPrintOpGraphPass(llvm::raw_ostream &os, bool shortNames,
- const llvm::Twine &title) {
+std::unique_ptr<OpPassBase<ModuleOp>>
+mlir::createPrintOpGraphPass(raw_ostream &os, bool shortNames,
+ const Twine &title) {
return std::make_unique<PrintOpPass>(os, shortNames, title);
}
-static mlir::PassRegistration<PrintOpPass> pass("print-op-graph",
- "Print op graph per region");
+static PassRegistration<PrintOpPass> pass("print-op-graph",
+ "Print op graph per region");
diff --git a/mlir/lib/Transforms/ViewRegionGraph.cpp b/mlir/lib/Transforms/ViewRegionGraph.cpp
index 57c2f31e6a4..db55415d62e 100644
--- a/mlir/lib/Transforms/ViewRegionGraph.cpp
+++ b/mlir/lib/Transforms/ViewRegionGraph.cpp
@@ -53,41 +53,40 @@ std::string DOTGraphTraits<Region *>::getNodeLabel(Block *Block, Region *) {
} // end namespace llvm
-void mlir::viewGraph(Region &region, const llvm::Twine &name, bool shortNames,
- const llvm::Twine &title,
- llvm::GraphProgram::Name program) {
+void mlir::viewGraph(Region &region, const Twine &name, bool shortNames,
+ const Twine &title, llvm::GraphProgram::Name program) {
llvm::ViewGraph(&region, name, shortNames, title, program);
}
-llvm::raw_ostream &mlir::writeGraph(llvm::raw_ostream &os, Region &region,
- bool shortNames, const llvm::Twine &title) {
+raw_ostream &mlir::writeGraph(raw_ostream &os, Region &region, bool shortNames,
+ const Twine &title) {
return llvm::WriteGraph(os, &region, shortNames, title);
}
-void mlir::Region::viewGraph(const llvm::Twine &regionName) {
+void mlir::Region::viewGraph(const Twine &regionName) {
::mlir::viewGraph(*this, regionName);
}
void mlir::Region::viewGraph() { viewGraph("region"); }
namespace {
struct PrintCFGPass : public FunctionPass<PrintCFGPass> {
- PrintCFGPass(llvm::raw_ostream &os = llvm::errs(), bool shortNames = false,
- const llvm::Twine &title = "")
+ PrintCFGPass(raw_ostream &os = llvm::errs(), bool shortNames = false,
+ const Twine &title = "")
: os(os), shortNames(shortNames), title(title.str()) {}
void runOnFunction() override {
mlir::writeGraph(os, getFunction().getBody(), shortNames, title);
}
private:
- llvm::raw_ostream &os;
+ raw_ostream &os;
bool shortNames;
std::string title;
};
} // namespace
std::unique_ptr<mlir::OpPassBase<mlir::FuncOp>>
-mlir::createPrintCFGGraphPass(llvm::raw_ostream &os, bool shortNames,
- const llvm::Twine &title) {
+mlir::createPrintCFGGraphPass(raw_ostream &os, bool shortNames,
+ const Twine &title) {
return std::make_unique<PrintCFGPass>(os, shortNames, title);
}
OpenPOWER on IntegriCloud