diff options
| author | Uday Bondhugula <bondhugula@google.com> | 2018-10-30 17:43:06 -0700 |
|---|---|---|
| committer | jpienaar <jpienaar@google.com> | 2019-03-29 13:46:08 -0700 |
| commit | 8201e19e3dc63e0c3edd0fb38f498158a8f67568 (patch) | |
| tree | 4f2dcd6c4a9b1066fae98758957d502344e518b6 /mlir/tools/mlir-opt/mlir-opt.cpp | |
| parent | 4c465a181db49c436f62da303e8fdd3ed317fee7 (diff) | |
| download | bcm5719-llvm-8201e19e3dc63e0c3edd0fb38f498158a8f67568.tar.gz bcm5719-llvm-8201e19e3dc63e0c3edd0fb38f498158a8f67568.zip | |
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
Diffstat (limited to 'mlir/tools/mlir-opt/mlir-opt.cpp')
| -rw-r--r-- | mlir/tools/mlir-opt/mlir-opt.cpp | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp index 38d72c6c652..94487fcc307 100644 --- a/mlir/tools/mlir-opt/mlir-opt.cpp +++ b/mlir/tools/mlir-opt/mlir-opt.cpp @@ -21,6 +21,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Analysis/Passes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/CFGFunction.h" #include "mlir/IR/Location.h" @@ -28,10 +29,10 @@ #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Module.h" #include "mlir/Parser.h" +#include "mlir/Pass.h" #include "mlir/TensorFlow/ControlFlowOps.h" #include "mlir/TensorFlow/Passes.h" #include "mlir/Transforms/CFGFunctionViewGraph.h" -#include "mlir/Transforms/Pass.h" #include "mlir/Transforms/Passes.h" #include "mlir/XLA/Passes.h" #include "llvm/Support/CommandLine.h" @@ -70,13 +71,14 @@ enum Passes { ComposeAffineMaps, ConstantFold, ConvertToCFG, - Vectorize, + MemRefBoundCheck, LoopUnroll, LoopUnrollAndJam, PipelineDataTransfer, PrintCFGGraph, SimplifyAffineStructures, TFRaiseControlFlow, + Vectorize, XLALower, }; @@ -90,8 +92,8 @@ static cl::list<Passes> passList( "Constant fold operations in functions"), clEnumValN(ConvertToCFG, "convert-to-cfg", "Convert all ML functions in the module to CFG ones"), - clEnumValN(Vectorize, "vectorize", - "Vectorize to a target independent n-D vector abstraction."), + clEnumValN(MemRefBoundCheck, "memref-bound-check", + "Convert all ML functions in the module to CFG ones"), clEnumValN(LoopUnroll, "loop-unroll", "Unroll loops"), clEnumValN(LoopUnrollAndJam, "loop-unroll-jam", "Unroll and jam loops"), clEnumValN(PipelineDataTransfer, "pipeline-data-transfer", @@ -103,6 +105,8 @@ static cl::list<Passes> passList( "Simplify affine expressions"), clEnumValN(TFRaiseControlFlow, "tf-raise-control-flow", "Dynamic TensorFlow Switch/Match nodes to a CFG"), + clEnumValN(Vectorize, "vectorize", + "Vectorize to a target independent n-D vector abstraction."), clEnumValN(XLALower, "xla-lower", "Lower to XLA dialect"))); enum OptResult { OptSuccess, OptFailure }; @@ -191,8 +195,8 @@ static OptResult performActions(SourceMgr &sourceMgr, MLIRContext *context) { case ConvertToCFG: pass = createConvertToCFGPass(); break; - case Vectorize: - pass = createVectorizePass(); + case MemRefBoundCheck: + pass = createMemRefBoundCheckPass(); break; case LoopUnroll: pass = createLoopUnrollPass(); @@ -212,6 +216,9 @@ static OptResult performActions(SourceMgr &sourceMgr, MLIRContext *context) { case TFRaiseControlFlow: pass = createRaiseTFControlFlowPass(); break; + case Vectorize: + pass = createVectorizePass(); + break; case XLALower: pass = createXLALowerPass(); break; |

