From 8201e19e3dc63e0c3edd0fb38f498158a8f67568 Mon Sep 17 00:00:00 2001 From: Uday Bondhugula Date: Tue, 30 Oct 2018 17:43:06 -0700 Subject: Introduce memref bound checking. Introduce analysis to check memref accesses (in MLFunctions) for out of bound ones. It works as follows: $ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir /tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1 %y = load %B[%idy] : memref<128 x i32> ^ /tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1 %y = load %B[%idy] : memref<128 x i32> ^ #map0 = (d0, d1) -> (d0, d1) #map1 = (d0, d1) -> (d0 * 128 - d1) mlfunc @test() { %0 = alloc() : memref<9x9xi32> %1 = alloc() : memref<128xi32> for %i0 = -1 to 9 { for %i1 = -1 to 9 { %2 = affine_apply #map0(%i0, %i1) %3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32> %4 = affine_apply #map1(%i0, %i1) %5 = load %1[%4] : memref<128xi32> } } return } - Improves productivity while manually / semi-automatically developing MLIR for testing / prototyping; also provides an indirect way to catch errors in transformations. - This pass is an easy way to test the underlying affine analysis machinery including low level routines. Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256. While on this: - create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/ - fix a bug in AffineAnalysis.cpp::toAffineExpr TODO: extend to non-constant loop bounds (straightforward). Will transparently work for all accesses once floordiv, mod, ceildiv are supported in the AffineMap -> FlatAffineConstraints conversion. PiperOrigin-RevId: 219397961 --- mlir/lib/Analysis/AffineStructures.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'mlir/lib/Analysis/AffineStructures.cpp') diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp index de7c5db3e07..52c15bb9237 100644 --- a/mlir/lib/Analysis/AffineStructures.cpp +++ b/mlir/lib/Analysis/AffineStructures.cpp @@ -493,6 +493,22 @@ FlatAffineConstraints::FlatAffineConstraints(IntegerSet set) } } +void FlatAffineConstraints::reset(unsigned numReservedInequalities, + unsigned numReservedEqualities, + unsigned newNumReservedCols, + unsigned newNumDims, unsigned newNumSymbols, + unsigned newNumLocals) { + assert(newNumReservedCols >= 1 && "minimum 1 column"); + numReservedCols = newNumReservedCols; + numDims = newNumDims; + numSymbols = newNumSymbols; + numIds = numDims + numSymbols + newNumLocals; + equalities.clear(); + inequalities.clear(); + equalities.reserve(newNumReservedCols * numReservedEqualities); + inequalities.reserve(newNumReservedCols * numReservedInequalities); +} + /// Adds a dimensional identifier. The added column is initialized to /// zero. void FlatAffineConstraints::addDimId(unsigned pos) { -- cgit v1.2.3