summaryrefslogtreecommitdiffstats
path: root/mlir/examples
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-10-24 15:00:36 -0700
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-10-24 15:01:09 -0700
commit2b61b7979eb59df579a3a4bf6fe768ddf2a556f4 (patch)
treeecbca8b29c18ac250c144271dae96ed3859d64af /mlir/examples
parentef43b5653830641766997c10699682f90f1c24da (diff)
downloadbcm5719-llvm-2b61b7979eb59df579a3a4bf6fe768ddf2a556f4.tar.gz
bcm5719-llvm-2b61b7979eb59df579a3a4bf6fe768ddf2a556f4.zip
Convert the Canonicalize and CSE passes to generic Operation Passes.
This allows for them to be used on other non-function, or even other function-like, operations. The algorithms are already generic, so this is simply changing the derived pass type. The majority of this change is just ensuring that the nesting of these passes remains the same, as the pass manager won't auto-nest them anymore. PiperOrigin-RevId: 276573038
Diffstat (limited to 'mlir/examples')
-rw-r--r--mlir/examples/toy/Ch3/toyc.cpp2
-rw-r--r--mlir/examples/toy/Ch4/toyc.cpp7
-rw-r--r--mlir/examples/toy/Ch5/toyc.cpp17
-rw-r--r--mlir/examples/toy/Ch6/toyc.cpp17
4 files changed, 25 insertions, 18 deletions
diff --git a/mlir/examples/toy/Ch3/toyc.cpp b/mlir/examples/toy/Ch3/toyc.cpp
index 72e1c6f1966..7e62e13432c 100644
--- a/mlir/examples/toy/Ch3/toyc.cpp
+++ b/mlir/examples/toy/Ch3/toyc.cpp
@@ -124,7 +124,7 @@ int dumpMLIR() {
applyPassManagerCLOptions(pm);
// Add a run of the canonicalizer to optimize the mlir module.
- pm.addPass(mlir::createCanonicalizerPass());
+ pm.addNestedPass<mlir::FuncOp>(mlir::createCanonicalizerPass());
if (mlir::failed(pm.run(*module)))
return 4;
}
diff --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp
index 3dd079d7798..d8e04d6ee89 100644
--- a/mlir/examples/toy/Ch4/toyc.cpp
+++ b/mlir/examples/toy/Ch4/toyc.cpp
@@ -130,9 +130,10 @@ int dumpMLIR() {
// Now that there is only one function, we can infer the shapes of each of
// the operations.
- pm.addPass(mlir::toy::createShapeInferencePass());
- pm.addPass(mlir::createCanonicalizerPass());
- pm.addPass(mlir::createCSEPass());
+ mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ optPM.addPass(mlir::toy::createShapeInferencePass());
+ optPM.addPass(mlir::createCanonicalizerPass());
+ optPM.addPass(mlir::createCSEPass());
if (mlir::failed(pm.run(*module)))
return 4;
diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp
index 9b47b43812f..54cbdf1405f 100644
--- a/mlir/examples/toy/Ch5/toyc.cpp
+++ b/mlir/examples/toy/Ch5/toyc.cpp
@@ -135,21 +135,24 @@ int dumpMLIR() {
// Now that there is only one function, we can infer the shapes of each of
// the operations.
- pm.addPass(mlir::toy::createShapeInferencePass());
- pm.addPass(mlir::createCanonicalizerPass());
- pm.addPass(mlir::createCSEPass());
+ mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ optPM.addPass(mlir::toy::createShapeInferencePass());
+ optPM.addPass(mlir::createCanonicalizerPass());
+ optPM.addPass(mlir::createCSEPass());
}
if (isLoweringToAffine) {
// Partially lower the toy dialect with a few cleanups afterwards.
pm.addPass(mlir::toy::createLowerToAffinePass());
- pm.addPass(mlir::createCanonicalizerPass());
- pm.addPass(mlir::createCSEPass());
+
+ mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ optPM.addPass(mlir::createCanonicalizerPass());
+ optPM.addPass(mlir::createCSEPass());
// Add optimizations if enabled.
if (EnableOpt) {
- pm.addPass(mlir::createLoopFusionPass());
- pm.addPass(mlir::createMemRefDataFlowOptPass());
+ optPM.addPass(mlir::createLoopFusionPass());
+ optPM.addPass(mlir::createMemRefDataFlowOptPass());
}
}
diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp
index 018dd0f8236..b7aed1b440c 100644
--- a/mlir/examples/toy/Ch6/toyc.cpp
+++ b/mlir/examples/toy/Ch6/toyc.cpp
@@ -149,21 +149,24 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
// Now that there is only one function, we can infer the shapes of each of
// the operations.
- pm.addPass(mlir::toy::createShapeInferencePass());
- pm.addPass(mlir::createCanonicalizerPass());
- pm.addPass(mlir::createCSEPass());
+ mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ optPM.addPass(mlir::toy::createShapeInferencePass());
+ optPM.addPass(mlir::createCanonicalizerPass());
+ optPM.addPass(mlir::createCSEPass());
}
if (isLoweringToAffine) {
// Partially lower the toy dialect with a few cleanups afterwards.
pm.addPass(mlir::toy::createLowerToAffinePass());
- pm.addPass(mlir::createCanonicalizerPass());
- pm.addPass(mlir::createCSEPass());
+
+ mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
+ optPM.addPass(mlir::createCanonicalizerPass());
+ optPM.addPass(mlir::createCSEPass());
// Add optimizations if enabled.
if (EnableOpt) {
- pm.addPass(mlir::createLoopFusionPass());
- pm.addPass(mlir::createMemRefDataFlowOptPass());
+ optPM.addPass(mlir::createLoopFusionPass());
+ optPM.addPass(mlir::createMemRefDataFlowOptPass());
}
}
OpenPOWER on IntegriCloud