diff options
| author | Nicolas Vasilache <ntv@google.com> | 2019-09-20 09:25:52 -0700 |
|---|---|---|
| committer | A. Unique TensorFlower <gardener@tensorflow.org> | 2019-09-20 09:26:21 -0700 |
| commit | a00b5682776555ed378d67ae2569d12c4e9b112d (patch) | |
| tree | f6dd9797e64d89b640ba044ee3fae809ef6ecf20 /mlir/test/lib/Transforms | |
| parent | 5002e98b92bbf300b41b7a7af6492d7c1bd662f4 (diff) | |
| download | bcm5719-llvm-a00b5682776555ed378d67ae2569d12c4e9b112d.tar.gz bcm5719-llvm-a00b5682776555ed378d67ae2569d12c4e9b112d.zip | |
Add utility to extract strides from layout map in MemRefType.
The RFC for unifying Linalg and Affine compilation passes into an end-to-end flow discusses the notion of a strided MemRef (https://groups.google.com/a/tensorflow.org/forum/#!topic/mlir/MaL8m2nXuio).
This CL adds helper functions to extract strides from the layout map which in turn will allow converting between a strided form of the type and a layout map.
For now strides are only computed on a single affine map with a single result (i.e. the closed subset of linearization maps that are compatible with striding semantics). This restriction will be reevaluated / lifted in the future based on concrete use cases.
PiperOrigin-RevId: 270284686
Diffstat (limited to 'mlir/test/lib/Transforms')
| -rw-r--r-- | mlir/test/lib/Transforms/TestMemRefStrideCalculation.cpp | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/mlir/test/lib/Transforms/TestMemRefStrideCalculation.cpp b/mlir/test/lib/Transforms/TestMemRefStrideCalculation.cpp new file mode 100644 index 00000000000..ac94931506e --- /dev/null +++ b/mlir/test/lib/Transforms/TestMemRefStrideCalculation.cpp @@ -0,0 +1,63 @@ +//===- TestMemRefStrideCalculation.cpp - Pass to test strides computation--===// +// +// Copyright 2019 The MLIR Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================= + +#include "mlir/Dialect/StandardOps/Ops.h" +#include "mlir/IR/StandardTypes.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/Passes.h" + +using namespace mlir; + +namespace { +/// Simple constant folding pass. +struct TestMemRefStrideCalculation + : public FunctionPass<struct TestMemRefStrideCalculation> { + void runOnFunction() override; +}; +} // end anonymous namespace + +// Traverse AllocOp and compute strides of each MemRefType independently. +void TestMemRefStrideCalculation::runOnFunction() { + getFunction().walk([&](AllocOp allocOp) { + auto memrefType = allocOp.getResult()->getType().cast<MemRefType>(); + SmallVector<int64_t, 4> strideVector; + if (failed(memrefType.getStrides(strideVector))) { + llvm::outs() << "MemRefType " << memrefType << " cannot be converted to " + << "strided form\n"; + return; + } + ArrayRef<int64_t> strides(strideVector); + auto offset = strides.back(); + strides = strides.drop_back(); + llvm::outs() << "MemRefType offset: "; + if (offset == MemRefType::kDynamicStride) + llvm::outs() << "?"; + else + llvm::outs() << offset; + llvm::outs() << " strides: "; + interleaveComma(strides, llvm::outs(), [&](int64_t v) { + if (v == MemRefType::kDynamicStride) + llvm::outs() << "?"; + else + llvm::outs() << v; + }); + llvm::outs() << "\n"; + }); +} + +static PassRegistration<TestMemRefStrideCalculation> + pass("test-memref-stride-calculation", "Test operation constant folding"); |

