diff options
| author | Andy Davis <andydavis@google.com> | 2019-11-07 10:19:54 -0800 |
|---|---|---|
| committer | A. Unique TensorFlower <gardener@tensorflow.org> | 2019-11-07 10:20:23 -0800 |
| commit | 8f00b4494d0434eedd8c80917d17d33d395ea65d (patch) | |
| tree | a530978d2909a8ae8e1d42c51d164be720f917ba | |
| parent | 6b4e30b7c80782a2e1616c739b8a598ed72b725d (diff) | |
| download | bcm5719-llvm-8f00b4494d0434eedd8c80917d17d33d395ea65d.tar.gz bcm5719-llvm-8f00b4494d0434eedd8c80917d17d33d395ea65d.zip | |
Swap operand order in std.view operation so that offset appears before dynamic sizes in the operand list.
PiperOrigin-RevId: 279114236
| -rw-r--r-- | mlir/include/mlir/Dialect/StandardOps/Ops.td | 35 | ||||
| -rw-r--r-- | mlir/lib/Dialect/StandardOps/Ops.cpp | 72 | ||||
| -rw-r--r-- | mlir/test/Dialect/Linalg/promote.mlir | 12 | ||||
| -rw-r--r-- | mlir/test/IR/core-ops.mlir | 22 | ||||
| -rw-r--r-- | mlir/test/IR/invalid-ops.mlir | 10 | ||||
| -rw-r--r-- | mlir/test/Transforms/canonicalize.mlir | 25 |
6 files changed, 95 insertions, 81 deletions
diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.td b/mlir/include/mlir/Dialect/StandardOps/Ops.td index 4dd22bab2d9..710ffe1b63d 100644 --- a/mlir/include/mlir/Dialect/StandardOps/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/Ops.td @@ -1139,34 +1139,34 @@ def ViewOp : Std_Op<"view"> { The "view" operation converts a 1-D memref with i8 element type, to an N-D memref with arbitrary element type. In addition, the ViewOp supports the following arguments: - *) A dynamic size operand must be specified for each dynamic dimension - in the resulting view memref type. *) A single dynamic offset operand can be specified which represents a a dynamic offset within the base 1-D memref at which to create the resulting memref view. + *) A dynamic size operand must be specified for each dynamic dimension + in the resulting view memref type. // Allocate a flat 1D/i8 memref. %0 = alloc() : memref<2048xi8> - // ViewOp with static sizes and offset. + // ViewOp with static offset and sizes. %1 = view %0[][] : memref<2048xi8> to memref<64x4xf32> - // ViewOp with one dynamic size and a dynamic offset. - %2 = view %0[%size0][%offset_1024] + // ViewOp with dynamic offset and one dynamic size. + %2 = view %0[%offset_1024][%size0] : memref<2048xi8> to memref<?x4xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0) // ViewOp creating 3D shape where two of the dim sizes are dynamic. + // *) The dynamic offset specified in the ViewOp is applied to the + // base 1-D memref, and is represented by the symbol 's0' in the + // layout map of the ViewOp result memref type. // *) The dynamic size for the second dimension induces a dynamic // stride for the first dimension, which is represented by the - // symbol 's0' in the layout map of the ViewOp result memref type. + // symbol 's1' in the layout map of the ViewOp result memref type. // Note that this dynamic stride will be computed from the view // shape and dynamic sizes. - // *) The dynamic offset specified in the ViewOp is applied to the - // base 1-D memref, and is represented by the symbol 's1' in the - // layout map of the ViewOp result memref type. - %3 = view %0[%size0, %size1][%offset_1024] + %3 = view %0[%offset_1024][%size0, %size1] : memref<2048xi8> to memref<?x?x4xf32, - (d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * 4 + d2 + s1) + (d0, d1, d2)[s0, s1] -> (d0 * s1 + d1 * 4 + d2 + s0) }]; let arguments = (ins MemRefRankOf<[I8], [1]>:$source, @@ -1179,17 +1179,16 @@ def ViewOp : Std_Op<"view"> { /// Returns the dynamic offset for this shape cast operation if specified. /// Returns nullptr if no dynamic offset was specified. - Value *getDynamicOffset() { - unsigned offsetPos = 1 + getType().getNumDynamicDims(); - if (offsetPos >= getNumOperands()) - return nullptr; - return getOperand(offsetPos); + Value *getDynamicOffset(); + + /// Returns the starting operand list position of the dynamic size operands. + unsigned getDynamicSizesOperandStart() { + return getDynamicOffset() == nullptr ? 1 : 2; } /// Returns the dynamic sizes for this shape cast operation. operand_range getDynamicSizes() { - return {operand_begin() + 1, - operand_begin() + 1 + getType().getNumDynamicDims()}; + return {operand_begin() + getDynamicSizesOperandStart(), operand_end()}; } }]; diff --git a/mlir/lib/Dialect/StandardOps/Ops.cpp b/mlir/lib/Dialect/StandardOps/Ops.cpp index 60002649a21..9fc6f320e1d 100644 --- a/mlir/lib/Dialect/StandardOps/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/Ops.cpp @@ -2346,29 +2346,40 @@ static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) { Type srcType, dstType; return failure( parser.parseOperand(srcInfo) || - parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) || parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square) || + parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) || parser.parseOptionalAttrDict(result.attributes) || parser.parseColonType(srcType) || parser.resolveOperand(srcInfo, srcType, result.operands) || - parser.resolveOperands(sizesInfo, indexType, result.operands) || parser.resolveOperands(offsetInfo, indexType, result.operands) || + parser.resolveOperands(sizesInfo, indexType, result.operands) || parser.parseKeywordType("to", dstType) || parser.addTypeToList(dstType, result.types)); } static void print(OpAsmPrinter &p, ViewOp op) { p << op.getOperationName() << ' ' << *op.getOperand(0) << '['; - p.printOperands(op.getDynamicSizes()); - p << "]["; auto *dynamicOffset = op.getDynamicOffset(); if (dynamicOffset != nullptr) p.printOperand(dynamicOffset); + p << "]["; + p.printOperands(op.getDynamicSizes()); p << ']'; p.printOptionalAttrDict(op.getAttrs()); p << " : " << op.getOperand(0)->getType() << " to " << op.getType(); } +Value *ViewOp::getDynamicOffset() { + int64_t offset; + llvm::SmallVector<int64_t, 4> strides; + auto result = + succeeded(mlir::getStridesAndOffset(getType(), strides, offset)); + assert(result); + if (result && offset == MemRefType::getDynamicStrideOrOffset()) + return getOperand(1); + return nullptr; +} + static LogicalResult verify(ViewOp op) { auto baseType = op.getOperand(0)->getType().cast<MemRefType>(); auto viewType = op.getResult()->getType().cast<MemRefType>(); @@ -2438,13 +2449,37 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> { return matchFailure(); auto map = memrefType.getAffineMaps()[0]; + // Get offset from old memref view type 'memRefType'. + int64_t oldOffset; + llvm::SmallVector<int64_t, 4> oldStrides; + if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) + return matchFailure(); + + SmallVector<Value *, 4> newOperands; + SmallVector<Value *, 4> droppedOperands; + + // Fold dynamic offset operand if it is produced by a constant. + auto *dynamicOffset = viewOp.getDynamicOffset(); + int64_t newOffset = oldOffset; + unsigned dynamicOffsetOperandCount = 0; + if (dynamicOffset != nullptr) { + auto *defOp = dynamicOffset->getDefiningOp(); + if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) { + // Dynamic offset will be folded into the map. + newOffset = constantIndexOp.getValue(); + droppedOperands.push_back(dynamicOffset); + } else { + // Unable to fold dynamic offset. Add it to 'newOperands' list. + newOperands.push_back(dynamicOffset); + dynamicOffsetOperandCount = 1; + } + } + // Fold any dynamic dim operands which are produced by a constant. SmallVector<int64_t, 4> newShapeConstants; newShapeConstants.reserve(memrefType.getRank()); - SmallVector<Value *, 4> newOperands; - SmallVector<Value *, 4> droppedOperands; - unsigned dynamicDimPos = 1; + unsigned dynamicDimPos = viewOp.getDynamicSizesOperandStart(); unsigned rank = memrefType.getRank(); for (unsigned dim = 0, e = rank; dim < e; ++dim) { int64_t dimSize = memrefType.getDimSize(dim); @@ -2467,29 +2502,6 @@ struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> { dynamicDimPos++; } - // Get offset from old memref view type 'memRefType'. - int64_t oldOffset; - llvm::SmallVector<int64_t, 4> oldStrides; - if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) - return matchFailure(); - - // Fold dynamic offset operand if it is produced by a constant. - auto *dynamicOffset = viewOp.getDynamicOffset(); - int64_t newOffset = oldOffset; - unsigned dynamicOffsetOperandCount = 0; - if (dynamicOffset != nullptr) { - auto *defOp = dynamicOffset->getDefiningOp(); - if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) { - // Dynamic offset will be folded into the map. - newOffset = constantIndexOp.getValue(); - droppedOperands.push_back(dynamicOffset); - } else { - // Unable to fold dynamic offset. Add it to 'newOperands' list. - newOperands.push_back(dynamicOffset); - dynamicOffsetOperandCount = 1; - } - } - // Compute new strides based on 'newShapeConstants'. SmallVector<int64_t, 4> newStrides(rank); newStrides[rank - 1] = 1; diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir index 51f8b35012e..db33e044998 100644 --- a/mlir/test/Dialect/Linalg/promote.mlir +++ b/mlir/test/Dialect/Linalg/promote.mlir @@ -51,18 +51,18 @@ module { // CHECK: %[[vC:.*]] = linalg.subview {{.*}} : memref<?x?xf32, #[[strided2D]]> /// // CHECK: %[[tmpA:.*]] = alloc() : memref<32xi8> -// CHECK: %[[fullA:.*]] = std.view %[[tmpA]][{{.*}}][] : memref<32xi8> to memref<?x?xf32> -// DYNAMIC: std.view %{{.*}}[{{.*}}][] : memref<?xi8> to memref<?x?xf32> +// CHECK: %[[fullA:.*]] = std.view %[[tmpA]][][{{.*}}] : memref<32xi8> to memref<?x?xf32> +// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref<?xi8> to memref<?x?xf32> // CHECK: %[[partialA:.*]] = linalg.slice %[[fullA]][%{{.*}}, %{{.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #[[strided2DnoOffset]]> /// // CHECK: %[[tmpB:.*]] = alloc() : memref<48xi8> -// CHECK: %[[fullB:.*]] = std.view %[[tmpB]][{{.*}}][] : memref<48xi8> to memref<?x?xf32> -// DYNAMIC: std.view %{{.*}}[{{.*}}][] : memref<?xi8> to memref<?x?xf32> +// CHECK: %[[fullB:.*]] = std.view %[[tmpB]][][{{.*}}] : memref<48xi8> to memref<?x?xf32> +// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref<?xi8> to memref<?x?xf32> // CHECK: %[[partialB:.*]] = linalg.slice %[[fullB]][%{{.*}}, %{{.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #[[strided2DnoOffset]]> /// // CHECK: %[[tmpC:.*]] = alloc() : memref<24xi8> -// CHECK: %[[fullC:.*]] = std.view %[[tmpC]][{{.*}}][] : memref<24xi8> to memref<?x?xf32> -// DYNAMIC: std.view %{{.*}}[{{.*}}][] : memref<?xi8> to memref<?x?xf32> +// CHECK: %[[fullC:.*]] = std.view %[[tmpC]][][{{.*}}] : memref<24xi8> to memref<?x?xf32> +// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref<?xi8> to memref<?x?xf32> // CHECK: %[[partialC:.*]] = linalg.slice %[[fullC]][%{{.*}}, %{{.*}}] : memref<?x?xf32>, !linalg.range, !linalg.range, memref<?x?xf32, #[[strided2DnoOffset]]> // CHECK: linalg.fill(%[[fullA]], {{.*}}) : memref<?x?xf32>, f32 diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir index bbabe60d12d..252a13df102 100644 --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -12,7 +12,7 @@ // CHECK-DAG: #[[map_proj_d0d1_d1d0:map[0-9]+]] = (d0, d1) -> (d1, d0) // CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = (d0, d1) -> (d0 * 4 + d1) -// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1) +// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0) // CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = (d0, d1)[s0] -> (d0 * s0 + d1) // CHECK-LABEL: func @func_with_ops(%arg0: f32) { @@ -480,23 +480,23 @@ func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>) { func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> // Test two dynamic sizes and dynamic offset. - // CHECK: %{{.*}} = std.view %0[%arg0, %arg1][%arg2] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP2]]> - %1 = view %0[%arg0, %arg1][%arg2] - : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)> + // CHECK: %{{.*}} = std.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP2]]> + %1 = view %0[%arg2][%arg0, %arg1] + : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)> // Test two dynamic sizes and static offset. - // CHECK: %{{.*}} = std.view %0[%arg0, %arg1][] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP3]]> - %2 = view %0[%arg0, %arg1][] + // CHECK: %{{.*}} = std.view %0[][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP3]]> + %2 = view %0[][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * s0 + d1)> // Test one dynamic size and dynamic offset. - // CHECK: %{{.*}} = std.view %0[%arg1][%arg2] : memref<2048xi8> to memref<4x?xf32, #[[VIEW_MAP2]]> - %3 = view %0[%arg1][%arg2] - : memref<2048xi8> to memref<4x?xf32, (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)> + // CHECK: %{{.*}} = std.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32, #[[VIEW_MAP2]]> + %3 = view %0[%arg2][%arg1] + : memref<2048xi8> to memref<4x?xf32, (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)> // Test one dynamic size and static offset. - // CHECK: %{{.*}} = std.view %0[%arg0][] : memref<2048xi8> to memref<?x4xf32, #[[VIEW_MAP1]]> - %4 = view %0[%arg0][] + // CHECK: %{{.*}} = std.view %0[][%arg0] : memref<2048xi8> to memref<?x4xf32, #[[VIEW_MAP1]]> + %4 = view %0[][%arg0] : memref<2048xi8> to memref<?x4xf32, (d0, d1) -> (d0 * 4 + d1)> // Test static sizes and static offset. diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir index 4d1d853dadb..ec38ecebb0e 100644 --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -907,7 +907,7 @@ func @invalid_splat(%v : f32) { // expected-note {{prior use here}} func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> // expected-error@+1 {{incorrect number of operands for type}} - %1 = view %0[%arg0, %arg1][] + %1 = view %0[][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0)> return } @@ -917,7 +917,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> // expected-error@+1 {{is not strided}} - %1 = view %0[%arg0, %arg1][] + %1 = view %0[][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32, (d0, d1)[s0] -> (d0, d1, s0)> return } @@ -927,7 +927,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xf32> // expected-error@+1 {{must be 1D memref of 8-bit integer values}} - %1 = view %0[%arg0, %arg1][] + %1 = view %0[][%arg0, %arg1] : memref<2048xf32> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0)> return } @@ -937,7 +937,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8, (d0) -> (d0 floordiv 8, d0 mod 8)> // expected-error@+1 {{unsupported map for base memref}} - %1 = view %0[%arg0, %arg1][] + %1 = view %0[][%arg0, %arg1] : memref<2048xi8, (d0) -> (d0 floordiv 8, d0 mod 8)> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0)> return @@ -948,7 +948,7 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8, 2> // expected-error@+1 {{different memory spaces}} - %1 = view %0[%arg0, %arg1][] + %1 = view %0[][%arg0, %arg1] : memref<2048xi8, 2> to memref<?x?xf32, (d0, d1)[s0] -> (d0 * 4 + d1 + s0), 1> return diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir index 8ccf24061b9..134b96741c6 100644 --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -1,8 +1,8 @@ // RUN: mlir-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s -#TEST_VIEW_MAP0 = (d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1) -#TEST_VIEW_MAP1 = (d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2) -#TEST_VIEW_MAP2 = (d0, d1)[s0, s1] -> (d0 * 4 + d1 + s1) +#TEST_VIEW_MAP0 = (d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0) +#TEST_VIEW_MAP1 = (d0, d1, d2)[s0, s1] -> (d0 * s1 + d1 * s0 + d2) +#TEST_VIEW_MAP2 = (d0, d1)[s0] -> (d0 * 4 + d1 + s0) // CHECK-DAG: #[[VIEW_MAP0:map[0-9]+]] = (d0, d1) -> (d0 * 11 + d1 + 15) // CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = (d0, d1)[s0] -> (d0 * 11 + s0 + d1) @@ -599,28 +599,31 @@ func @view(%arg0 : index) { // Test: fold constant sizes and offset, update map with static stride/offset. // CHECK: std.view %0[][] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP0]]> - %1 = view %0[%c7, %c11][%c15] + %1 = view %0[%c15][%c7, %c11] : memref<2048xi8> to memref<?x?xf32, #TEST_VIEW_MAP0> + // Test: fold constant sizes but not offset, update map with static stride. // Test that we do not a fold dynamic dim which is not produced by a constant. - // CHECK: std.view %0[][%arg0] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP1]]> - %2 = view %0[%c7, %c11][%arg0] + // CHECK: std.view %0[%arg0][] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP1]]> + %2 = view %0[%arg0][%c7, %c11] : memref<2048xi8> to memref<?x?xf32, #TEST_VIEW_MAP0> + // Test: fold constant offset but not sizes, update map with constant offset. // Test that we fold constant offset but not dynamic dims. - // CHECK: std.view %0[%arg0, %arg0][] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP2]]> - %3 = view %0[%arg0, %arg0][%c15] + // CHECK: std.view %0[][%arg0, %arg0] : memref<2048xi8> to memref<?x?xf32, #[[VIEW_MAP2]]> + %3 = view %0[%c15][%arg0, %arg0] : memref<2048xi8> to memref<?x?xf32, #TEST_VIEW_MAP0> + // Test: fold one constant dim, no offset, should update with constant // stride on dim 1, but leave dynamic stride on dim 0. - // CHECK: std.view %0[%arg0, %arg0][] : memref<2048xi8> to memref<?x?x7xf32, #[[VIEW_MAP3]]> - %4 = view %0[%arg0, %arg0, %c7][] + // CHECK: std.view %0[][%arg0, %arg0] : memref<2048xi8> to memref<?x?x7xf32, #[[VIEW_MAP3]]> + %4 = view %0[][%arg0, %arg0, %c7] : memref<2048xi8> to memref<?x?x?xf32, #TEST_VIEW_MAP1> // Test: preserve an existing static dim size while folding a dynamic // dimension and offset. // CHECK: std.view %0[][] : memref<2048xi8> to memref<7x4xf32, #[[VIEW_MAP4]]> - %5 = view %0[%c7][%c15] + %5 = view %0[%c15][%c7] : memref<2048xi8> to memref<?x4xf32, #TEST_VIEW_MAP2> return |

