summaryrefslogtreecommitdiffstats
path: root/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-07-01 10:29:09 -0700
committerjpienaar <jpienaar@google.com>2019-07-01 11:39:00 -0700
commit54cd6a7e97a226738e2c85b86559918dd9e3cd5d (patch)
treeaffa803347d6695be575137d1ad55a055a8021e3 /mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
parent84bd67fc4fd116e80f7a66bfadfe9a7fd6fd5e82 (diff)
downloadbcm5719-llvm-54cd6a7e97a226738e2c85b86559918dd9e3cd5d.tar.gz
bcm5719-llvm-54cd6a7e97a226738e2c85b86559918dd9e3cd5d.zip
NFC: Refactor Function to be value typed.
Move the data members out of Function and into a new impl storage class 'FunctionStorage'. This allows for Function to become value typed, which will greatly simplify the transition of Function to FuncOp(given that FuncOp is also value typed). PiperOrigin-RevId: 255983022
Diffstat (limited to 'mlir/examples/Linalg/Linalg3/lib/Transforms.cpp')
-rw-r--r--mlir/examples/Linalg/Linalg3/lib/Transforms.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
index d5c8641acbe..7b9e5ffee96 100644
--- a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
+++ b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp
@@ -35,8 +35,8 @@ using namespace mlir::edsc::intrinsics;
using namespace linalg;
using namespace linalg::intrinsics;
-void linalg::composeSliceOps(mlir::Function *f) {
- f->walk<SliceOp>([](SliceOp sliceOp) {
+void linalg::composeSliceOps(mlir::Function f) {
+ f.walk<SliceOp>([](SliceOp sliceOp) {
auto *sliceResult = sliceOp.getResult();
auto viewOp = emitAndReturnFullyComposedView(sliceResult);
sliceResult->replaceAllUsesWith(viewOp.getResult());
@@ -44,8 +44,8 @@ void linalg::composeSliceOps(mlir::Function *f) {
});
}
-void linalg::lowerToFinerGrainedTensorContraction(mlir::Function *f) {
- f->walk([](Operation *op) {
+void linalg::lowerToFinerGrainedTensorContraction(mlir::Function f) {
+ f.walk([](Operation *op) {
if (auto matmulOp = dyn_cast<linalg::MatmulOp>(op)) {
matmulOp.writeAsFinerGrainTensorContraction();
} else if (auto matvecOp = dyn_cast<linalg::MatvecOp>(op)) {
@@ -211,8 +211,8 @@ linalg::writeAsLoops(Operation *op) {
return llvm::None;
}
-void linalg::lowerToLoops(mlir::Function *f) {
- f->walk([](Operation *op) {
+void linalg::lowerToLoops(mlir::Function f) {
+ f.walk([](Operation *op) {
if (writeAsLoops(op))
op->erase();
});
OpenPOWER on IntegriCloud